hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
283187b1acdcf513705c7326dc53d32846d75468 | 1,054 | py | Python | image.py | harshkothari410/snn-image-segmentation | 18fb28e8b2fee3d7583f6e62fd512ba90863c0ee | [
"MIT"
] | 7 | 2016-04-17T21:11:41.000Z | 2021-06-25T09:40:40.000Z | image.py | Arthas1121/snn-image-segmentation | 18fb28e8b2fee3d7583f6e62fd512ba90863c0ee | [
"MIT"
] | null | null | null | image.py | Arthas1121/snn-image-segmentation | 18fb28e8b2fee3d7583f6e62fd512ba90863c0ee | [
"MIT"
] | 6 | 2016-04-17T19:14:41.000Z | 2022-03-09T21:03:12.000Z | from PIL import Image
def imageread(filename):
file = Image.open(filename)
pixel_values = list(file.getdata())
# Compute H and W
w, h = file.size
# compute pixel matrix
pixel_mat = [[0 for x in xrange(w)] for x in xrange(h)]
count = 0
for x in xrange(h):
for y in xrange(w):
# print type(pixel_values[count])
try:
if len( pixel_values[count] ) > 1:
pixel_mat[x][y] = pixel_values[count][0] #check whether is
else:
pixel_mat[x][y] = pixel_values[count]
count+=1
except:
pixel_mat[x][y] = pixel_values[count]
count+=1
return pixel_mat, w, h
def imagewrite(data, w, h):
final_ans = []
count = 0
for x in xrange(h):
for y in xrange(w):
final_ans.append( data[x][y] )
count+=1
im = Image.new('1', (w,h))
# print im
im.putdata(final_ans)
im.show()
def imagesave(data, w, h, name):
final_ans = []
count = 0
for x in xrange(h):
for y in xrange(w):
final_ans.append( data[x][y] )
count+=1
im = Image.new('1', (w,h))
# print im
im.putdata(final_ans)
im.save(name+'.jpg') | 19.163636 | 64 | 0.622391 |
862c998692f1850567159b1010a13f98027238a1 | 2,774 | py | Python | nipype/interfaces/camino/tests/test_auto_TrackPICo.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 7 | 2017-02-17T08:54:26.000Z | 2022-03-10T20:57:23.000Z | nipype/interfaces/camino/tests/test_auto_TrackPICo.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 1 | 2016-04-25T15:07:09.000Z | 2016-04-25T15:07:09.000Z | nipype/interfaces/camino/tests/test_auto_TrackPICo.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..dti import TrackPICo
def test_TrackPICo_inputs():
input_map = dict(
anisfile=dict(argstr='-anisfile %s', ),
anisthresh=dict(argstr='-anisthresh %f', ),
args=dict(argstr='%s', ),
curveinterval=dict(
argstr='-curveinterval %f',
requires=['curvethresh'],
),
curvethresh=dict(argstr='-curvethresh %f', ),
data_dims=dict(
argstr='-datadims %s',
units='voxels',
),
environ=dict(
nohash=True,
usedefault=True,
),
gzip=dict(argstr='-gzip', ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='-inputfile %s',
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s', ),
inputmodel=dict(
argstr='-inputmodel %s',
usedefault=True,
),
interpolator=dict(argstr='-interpolator %s', ),
ipthresh=dict(argstr='-ipthresh %f', ),
iterations=dict(
argstr='-iterations %d',
units='NA',
),
maxcomponents=dict(
argstr='-maxcomponents %d',
units='NA',
),
numpds=dict(
argstr='-numpds %d',
units='NA',
),
out_file=dict(
argstr='-outputfile %s',
genfile=True,
position=-1,
),
output_root=dict(
argstr='-outputroot %s',
position=-1,
),
outputtracts=dict(argstr='-outputtracts %s', ),
pdf=dict(argstr='-pdf %s', ),
seed_file=dict(
argstr='-seedfile %s',
position=2,
),
stepsize=dict(
argstr='-stepsize %f',
requires=['tracker'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
tracker=dict(
argstr='-tracker %s',
usedefault=True,
),
voxel_dims=dict(
argstr='-voxeldims %s',
units='mm',
),
)
inputs = TrackPICo.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TrackPICo_outputs():
output_map = dict(tracked=dict(), )
outputs = TrackPICo.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.597938 | 67 | 0.503965 |
0e167608dba640e8e33ffb8e133f56b11ba0dc0a | 10,649 | py | Python | apps/project/business/board.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/project/business/board.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/project/business/board.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | import json
import requests
from flask import request, g, current_app
from sqlalchemy import desc, func
from sqlalchemy.orm import aliased
from apps.auth.models.users import User
from apps.project.models.issue import Issue
from apps.project.models.modules import Module
from apps.project.models.tasks import Task, TaskCase
from apps.project.models.version import Version
from apps.public.models.public import Config
from library.api.transfer import transfer2json
class BoardBusiness(object):
@classmethod
@transfer2json(
'?id|!name|!description|!tmethod|!ttype|!status|!start_time|!end_time|!priority|!version_id|!version_name'
'|!creator_id|!creator_name|!executor_id|!executor_name|!project_id',
ispagination=True)
def task_query(cls, projectid, userid, status, iscreator, page_size, page_index, title):
# 0:创建,1:任务已删除,2:任务已完成
user_creator = aliased(User)
user_executor = aliased(User)
ret = Task.query.outerjoin(
user_creator, user_creator.id == Task.creator).outerjoin(
user_executor, user_executor.id == Task.executor).outerjoin(
Version, Version.id == Task.version).add_columns(
Task.id.label('id'),
Task.name.label('name'),
Task.description.label('description'),
Task.tmethod.label('tmethod'),
Task.ttype.label('ttype'),
Task.status.label('status'),
func.date_format(Task.start_time, "%Y-%m-%d").label('start_time'),
func.date_format(Task.end_time, "%Y-%m-%d").label('end_time'),
Task.priority.label('priority'),
Task.project_id.label('project_id'),
Version.id.label('version_id'),
Version.title.label('version_name'),
user_creator.id.label('creator_id'),
user_creator.nickname.label('creator_name'),
user_executor.id.label('executor_id'),
user_executor.nickname.label('executor_name'),
)
if projectid:
ret = ret.filter(Task.project_id == projectid)
if iscreator:
ret = ret.filter(Task.creator == userid)
else:
ret = ret.filter(Task.executor == userid)
if title not in ["", None]:
ret = ret.filter(Task.name.like(f'%{title}%'))
ret = ret.filter(Task.status.in_(status))
result = ret.order_by(desc(Task.id)
).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all()
count = ret.count()
return result, count
@classmethod
@transfer2json(
'?taskcaseid|!task_id|!executor_id|!executor_name|!handler_id|!handler_name|!exe_way|!cnumber|!ctype|!title|'
'!description|!precondition|!step_result|!is_auto|!status|!comment|!module_id|!module_name|!project_id',
ispagination=True
)
def task_case_query(cls, projectid, userid, status, iscreator, page_size, page_index, title):
# 0:case创建,1:case已删除,2:跳过,3:case执行通过,4:case执行不通过
user_executor = aliased(User)
user_handler = aliased(User)
ret = TaskCase.query.outerjoin(
Module, TaskCase.module_id == Module.id).outerjoin(
user_executor, user_executor.id == TaskCase.executor).outerjoin(
user_handler, user_handler.id == TaskCase.handler).add_columns(
TaskCase.id.label('taskcaseid'),
TaskCase.task_id.label('task_id'),
TaskCase.exe_way.label('exe_way'),
TaskCase.cnumber.label('cnumber'),
TaskCase.ctype.label('ctype'),
TaskCase.title.label('title'),
TaskCase.description.label('description'),
TaskCase.precondition.label('precondition'),
TaskCase.step_result.label('step_result'),
TaskCase.is_auto.label('is_auto'),
TaskCase.status.label('status'),
TaskCase.comment.label('comment'),
TaskCase.project_id.label('project_id'),
Module.id.label('module_id'),
Module.name.label('module_name'),
user_executor.id.label('executor_id'),
user_executor.nickname.label('executor_name'),
user_handler.id.label('handler_id'),
user_handler.nickname.label('handler_name'),
)
if projectid:
ret = ret.filter(TaskCase.project_id == projectid)
if iscreator is 1:
ret = ret.filter(TaskCase.handler == userid)
else:
ret = ret.filter(TaskCase.executor == userid)
if title not in ["", None]:
ret = ret.filter(TaskCase.title.like(f'%{title}%'))
ret = ret.filter(TaskCase.status.in_(status))
result = ret.order_by(desc(TaskCase.id)
).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all()
count = ret.count()
return result, count
@classmethod
@transfer2json('?id|!issue_number|!title|!handle_status|!description|!chance|!level|!priority|!stage'
'|!version_id|!version_name|!creator_id|!creator_name|!handler_id|!handler_name|!project_id',
ispagination=True
)
def issue_query(cls, projectid, userid, status, iscreator, page_size, page_index, title):
# 处理状态 {"1": "待办", "2": "处理中", "3": "测试中", "4": "已关闭", "5": "已拒绝", "6": "延时处理"}
user_creator = aliased(User)
user_handler = aliased(User)
ret = Issue.query.outerjoin(
user_creator, user_creator.id == Issue.creator).outerjoin(
user_handler, user_handler.id == Issue.handler).outerjoin(
Version, Version.id == Issue.version).add_columns(
Issue.id.label('id'),
Issue.issue_number.label('issue_number'),
Issue.title.label('title'),
Issue.handle_status.label('handle_status'),
Issue.description.label('description'),
Issue.chance.label('chance'),
Issue.level.label('level'),
Issue.priority.label('priority'),
Issue.stage.label('stage'),
Issue.project_id.label('project_id'),
Version.id.label('version_id'),
Version.title.label('version_name'),
user_creator.id.label('creator_id'),
user_creator.nickname.label('creator_name'),
user_handler.id.label('handler_id'),
user_handler.nickname.label('handler_name'),
)
if projectid:
ret = ret.filter(Issue.project_id == projectid)
if iscreator:
ret = ret.filter(Issue.creator == userid)
else:
ret = ret.filter(Issue.handler == userid)
if title not in ["", None]:
ret = ret.filter(Issue.title.like(f'%{title}%'))
ret = ret.filter(Issue.handle_status.in_(status), Issue.status == Issue.ACTIVE)
result = ret.order_by(desc(Issue.id)
).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all()
count = ret.count()
return result, count
@classmethod
def board_config(cls):
user_id = g.userid if g.userid else None
board_config = Config.query.add_columns(Config.content.label('content')).filter(Config.module == 'board',
Config.module_type == 1).first()
board_config = json.loads(board_config.content)
current_app.logger.info('board_config:' + str(board_config))
return user_id, board_config
@classmethod
def user_create(cls, page_size, page_index, r_type, title):
project_id = request.args.get('projectid')
user_id, board_config = cls.board_config()
ret = None
count = 0
if r_type == "task":
ret, count = cls.task_query(project_id, user_id, board_config['create']['task'], 1, page_size,
page_index, title)
# task_case_ret = cls.task_case_query(projectid, user_id, board_config['create']['task_case'], 1)
if r_type == "issue":
ret, count = cls.issue_query(project_id, user_id, board_config['create']['issue'], 1, page_size,
page_index, title)
return ret, count
@classmethod
def user_unfinish(cls, page_size, page_index, r_type, title):
project_id = request.args.get('projectid')
user_id, board_config = cls.board_config()
ret = None
count = 0
if r_type == "task":
ret, count = cls.task_query(project_id, user_id, board_config['unfinish']['task'], 0, page_size, page_index,
title)
if r_type == "task_case":
ret, count = cls.task_case_query(project_id, user_id, board_config['unfinish']['task_case'],
1, page_size, page_index, title)
if r_type == "issue":
ret, count = cls.issue_query(project_id, user_id, board_config['unfinish']['issue'], 0, page_size,
page_index, title)
return ret, count
@classmethod
def user_finish(cls, page_size, page_index, r_type, title):
project_id = request.args.get('projectid')
user_id, board_config = cls.board_config()
ret = None
count = 0
if r_type == "task":
ret, count = cls.task_query(project_id, user_id, board_config['finish']['task'], 0, page_size, page_index,
title)
if r_type == "task_case":
ret, count = cls.task_case_query(project_id, user_id, board_config['finish']['task_case'], 1,
page_size, page_index, title)
if r_type == "issue":
ret, count = cls.issue_query(project_id, user_id, board_config['finish']['issue'], 0, page_size,
page_index, title)
return ret, count
@classmethod
def stf_devices(cls):
stf_devices = Config.query.add_columns(Config.content.label('content')).filter(
Config.module == 'stf',
Config.module_type == 1).first()
stf_devices = json.loads(stf_devices.content)
current_app.logger.info(json.dumps(stf_devices, ensure_ascii=False))
url = stf_devices['URL']
headers = stf_devices['headers']
ret = requests.get(url, headers=headers)
ret = json.loads(ret.content)
# logger.info(json.dumps(ret, ensure_ascii=False))
return ret | 46.70614 | 120 | 0.594892 |
83d30c47ebde1323539e62d03f67b271652cf3be | 4,899 | py | Python | integration_testing/run_travis_tests.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | integration_testing/run_travis_tests.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | integration_testing/run_travis_tests.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | from data_generators import *
import traceback
import sys
import os
import itertools
import logging
from colorlog import ColoredFormatter
import time
import mindsdb
from mindsdb import CONST
#@TODO: Currently we use this isntead of randomly generated data since randomly generated data is not reliable enough
# We tell mindsDB what we want to learn and from what data
mdb = mindsdb.Predictor(name='home_rentals_price')
mdb.learn(
to_predict='rental_price', # the column we want to learn to predict given all the data in the file
from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv"
# the path to the file where we can learn from, (note: can be url)
)
prediction = mdb.predict(when={'sqft':300})
print(prediction[0])
amd = mdb.get_model_data('home_rentals_price')
'''
types_that_work = ['int','float','date','datetime','timestamp','ascii']
logger = None
def setup_testing_logger():
global logger
formatter = ColoredFormatter(
"%(log_color)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'black,bg_white',
'INFO': 'blue,bg_white',
'WARNING': 'orange,bg_white',
'ERROR': 'red,bg_white',
'CRITICAL': 'red,bg_white',
}
)
logger = logging.getLogger('mindsdb_integration_testing')
logger.handlers = []
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def run_tests():
logger.info('Starting one-label test')
separator = ','
train_file_name = 'train_data.csv'
test_file_name = 'test_data.csv'
data_len = 8000
# Create the full dataset
logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows')
try:
features = generate_value_cols(types_that_work,data_len, separator)
labels = [generate_labels_2(features, separator)]
feature_headers = list(map(lambda col: col[0], features))
label_headers = list(map(lambda col: col[0], labels))
# Create the training dataset and save it to a file
columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features))
columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels)))
columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers])
# Create the testing dataset and save it to a file
columns_test = list(map(lambda col: col[int(len(col)*3/4):], features))
columns_to_file(columns_test, test_file_name, separator, headers=feature_headers)
logger.debug(f'Datasets generate and saved to files successfully')
except:
print(traceback.format_exc())
logger.error(f'Failed to generate datasets !')
exit(1)
# Train
mdb = None
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
mdb.learn(from_data=train_file_name, to_predict=label_headers)
logger.info(f'--------------- Learning ran succesfully ---------------')
mdb.learn(from_data=train_file_name, to_predict=label_headers, rebuild_model=False)
logger.info(f'--------------- Additional learning ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed during the training !')
exit(1)
# Predict
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
results = mdb.predict(when_data=test_file_name)
for row in results:
expect_columns = [label_headers[0] ,label_headers[0] + '_confidence']
for col in expect_columns:
if col not in row:
logger.error(f'Prediction failed to return expected column: {col}')
logger.debug('Got row: {}'.format(row))
exit(1)
logger.info(f'--------------- Predicting ran succesfully ---------------')
# Print statements are in for debugging, remove later, but keep the funcion calls to make sure the interface is working
models = mdb.get_models()
amd = mdb.get_model_data('test_one_label_prediction')
print(amd)
except:
print(traceback.format_exc())
logger.error(f'Failed whilst predicting')
exit(1)
logger.info('Travis CLI Tests ran succesfully !')
setup_testing_logger()
run_tests()
'''
| 33.554795 | 154 | 0.654419 |
7a220662c532f6177643a5bc0c91a4955cdccfc8 | 42,389 | py | Python | template_container_human/labels/slice_43.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | template_container_human/labels/slice_43.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | template_container_human/labels/slice_43.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | coordinates_E0E1E1 = ((124, 121),
(124, 122), (125, 119), (125, 124), (126, 97), (126, 118), (126, 121), (126, 122), (126, 125), (126, 144), (127, 84), (127, 97), (127, 98), (127, 117), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 126), (127, 142), (127, 143), (128, 84), (128, 86), (128, 96), (128, 99), (128, 110), (128, 112), (128, 113), (128, 114), (128, 115), (128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 127), (128, 133), (128, 141), (128, 142), (129, 85), (129, 96), (129, 99), (129, 109), (129, 117), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 128), (129, 131), (129, 133), (129, 141), (130, 87), (130, 95), (130, 97), (130, 98), (130, 100), (130, 109), (130, 111), (130, 112), (130, 113), (130, 114), (130, 115), (130, 116), (130, 117), (130, 118),
(130, 119), (130, 120), (130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 126), (130, 127), (130, 130), (130, 133), (130, 140), (130, 141), (131, 86), (131, 94), (131, 96), (131, 97), (131, 98), (131, 100), (131, 109), (131, 111), (131, 112), (131, 113), (131, 114), (131, 115), (131, 116), (131, 117), (131, 118), (131, 119), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 125), (131, 126), (131, 127), (131, 128), (131, 131), (131, 132), (131, 134), (131, 139), (131, 141), (132, 87), (132, 89), (132, 90), (132, 91), (132, 92), (132, 95), (132, 96), (132, 97), (132, 98), (132, 99), (132, 101), (132, 109), (132, 111), (132, 112), (132, 113), (132, 114), (132, 115), (132, 116), (132, 117), (132, 118), (132, 119), (132, 120), (132, 121), (132, 122), (132, 123), (132, 124), (132, 125), (132, 126), (132, 127),
(132, 128), (132, 129), (132, 130), (132, 131), (132, 132), (132, 133), (132, 135), (132, 136), (132, 137), (132, 141), (133, 87), (133, 94), (133, 95), (133, 96), (133, 97), (133, 98), (133, 99), (133, 100), (133, 102), (133, 109), (133, 111), (133, 112), (133, 113), (133, 114), (133, 115), (133, 116), (133, 117), (133, 118), (133, 119), (133, 120), (133, 121), (133, 122), (133, 123), (133, 124), (133, 125), (133, 126), (133, 127), (133, 128), (133, 129), (133, 130), (133, 131), (133, 132), (133, 133), (133, 134), (133, 139), (133, 141), (134, 87), (134, 89), (134, 90), (134, 91), (134, 92), (134, 93), (134, 94), (134, 95), (134, 96), (134, 97), (134, 98), (134, 99), (134, 100), (134, 101), (134, 103), (134, 108), (134, 110), (134, 111), (134, 112), (134, 113), (134, 114), (134, 115), (134, 116), (134, 117), (134, 118), (134, 119),
(134, 120), (134, 121), (134, 122), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 130), (134, 131), (134, 132), (134, 133), (134, 134), (134, 135), (134, 136), (134, 137), (134, 138), (134, 139), (134, 141), (135, 87), (135, 89), (135, 90), (135, 91), (135, 92), (135, 93), (135, 94), (135, 95), (135, 96), (135, 97), (135, 98), (135, 99), (135, 100), (135, 101), (135, 102), (135, 104), (135, 107), (135, 109), (135, 110), (135, 111), (135, 112), (135, 113), (135, 114), (135, 115), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126), (135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 132), (135, 133), (135, 134), (135, 135), (135, 136), (135, 137), (135, 138), (135, 139), (135, 140), (135, 142), (136, 87),
(136, 89), (136, 90), (136, 91), (136, 92), (136, 93), (136, 94), (136, 95), (136, 96), (136, 97), (136, 98), (136, 99), (136, 100), (136, 101), (136, 102), (136, 103), (136, 108), (136, 109), (136, 110), (136, 111), (136, 112), (136, 113), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 132), (136, 133), (136, 134), (136, 135), (136, 136), (136, 137), (136, 138), (136, 139), (136, 140), (136, 141), (136, 145), (137, 80), (137, 82), (137, 83), (137, 84), (137, 85), (137, 88), (137, 89), (137, 90), (137, 91), (137, 92), (137, 93), (137, 94), (137, 95), (137, 96), (137, 97), (137, 98), (137, 99), (137, 100), (137, 101), (137, 102), (137, 103), (137, 104),
(137, 105), (137, 106), (137, 107), (137, 108), (137, 109), (137, 110), (137, 111), (137, 112), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 126), (137, 127), (137, 128), (137, 129), (137, 130), (137, 131), (137, 132), (137, 133), (137, 134), (137, 135), (137, 136), (137, 137), (137, 138), (137, 139), (137, 140), (137, 141), (137, 144), (138, 79), (138, 81), (138, 82), (138, 83), (138, 84), (138, 85), (138, 86), (138, 87), (138, 89), (138, 90), (138, 91), (138, 92), (138, 93), (138, 101), (138, 102), (138, 103), (138, 104), (138, 105), (138, 106), (138, 107), (138, 108), (138, 109), (138, 110), (138, 111), (138, 112), (138, 113), (138, 114), (138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 120), (138, 121),
(138, 122), (138, 123), (138, 124), (138, 125), (138, 126), (138, 127), (138, 128), (138, 129), (138, 130), (138, 131), (138, 132), (138, 133), (138, 134), (138, 135), (138, 136), (138, 137), (138, 138), (138, 139), (138, 143), (139, 89), (139, 90), (139, 91), (139, 94), (139, 95), (139, 96), (139, 97), (139, 98), (139, 99), (139, 100), (139, 102), (139, 103), (139, 104), (139, 105), (139, 106), (139, 107), (139, 108), (139, 109), (139, 110), (139, 111), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 119), (139, 120), (139, 121), (139, 122), (139, 123), (139, 124), (139, 125), (139, 126), (139, 127), (139, 128), (139, 129), (139, 130), (139, 131), (139, 132), (139, 133), (139, 134), (139, 135), (139, 136), (139, 137), (139, 138), (139, 141), (140, 89), (140, 92), (140, 101), (140, 103), (140, 104),
(140, 105), (140, 106), (140, 107), (140, 108), (140, 109), (140, 110), (140, 111), (140, 112), (140, 113), (140, 114), (140, 115), (140, 116), (140, 117), (140, 118), (140, 119), (140, 120), (140, 121), (140, 122), (140, 123), (140, 124), (140, 125), (140, 126), (140, 127), (140, 128), (140, 129), (140, 130), (140, 131), (140, 132), (140, 133), (140, 134), (140, 135), (140, 136), (140, 139), (141, 89), (141, 91), (141, 102), (141, 104), (141, 105), (141, 106), (141, 107), (141, 108), (141, 109), (141, 110), (141, 111), (141, 112), (141, 113), (141, 114), (141, 115), (141, 116), (141, 117), (141, 118), (141, 119), (141, 120), (141, 121), (141, 122), (141, 123), (141, 124), (141, 125), (141, 126), (141, 127), (141, 128), (141, 129), (141, 130), (141, 131), (141, 132), (141, 133), (141, 134), (141, 135), (141, 138), (142, 89), (142, 91), (142, 102),
(142, 104), (142, 105), (142, 106), (142, 107), (142, 108), (142, 109), (142, 110), (142, 111), (142, 112), (142, 113), (142, 114), (142, 115), (142, 116), (142, 117), (142, 118), (142, 119), (142, 120), (142, 121), (142, 122), (142, 123), (142, 124), (142, 125), (142, 126), (142, 127), (142, 128), (142, 129), (142, 130), (142, 131), (142, 132), (142, 133), (142, 134), (142, 136), (143, 89), (143, 102), (143, 104), (143, 105), (143, 106), (143, 107), (143, 108), (143, 109), (143, 110), (143, 111), (143, 112), (143, 113), (143, 114), (143, 115), (143, 116), (143, 117), (143, 118), (143, 119), (143, 120), (143, 121), (143, 122), (143, 123), (143, 124), (143, 125), (143, 126), (143, 127), (143, 128), (143, 129), (143, 130), (143, 131), (143, 132), (143, 133), (143, 135), (144, 88), (144, 89), (144, 102), (144, 104), (144, 105), (144, 106), (144, 107),
(144, 108), (144, 109), (144, 110), (144, 111), (144, 112), (144, 113), (144, 114), (144, 115), (144, 116), (144, 117), (144, 118), (144, 119), (144, 120), (144, 121), (144, 122), (144, 123), (144, 124), (144, 125), (144, 126), (144, 127), (144, 128), (144, 129), (144, 130), (144, 134), (145, 88), (145, 102), (145, 104), (145, 105), (145, 106), (145, 107), (145, 108), (145, 109), (145, 110), (145, 111), (145, 112), (145, 113), (145, 114), (145, 115), (145, 116), (145, 117), (145, 118), (145, 119), (145, 120), (145, 121), (145, 122), (145, 123), (145, 124), (145, 125), (145, 126), (145, 127), (145, 128), (145, 131), (145, 134), (146, 87), (146, 101), (146, 103), (146, 104), (146, 105), (146, 106), (146, 107), (146, 108), (146, 109), (146, 110), (146, 111), (146, 112), (146, 113), (146, 114), (146, 115), (146, 116), (146, 117), (146, 118), (146, 119),
(146, 120), (146, 121), (146, 122), (146, 123), (146, 124), (146, 125), (146, 129), (146, 130), (146, 134), (147, 86), (147, 87), (147, 101), (147, 103), (147, 104), (147, 105), (147, 106), (147, 107), (147, 108), (147, 109), (147, 110), (147, 111), (147, 112), (147, 113), (147, 114), (147, 115), (147, 116), (147, 117), (147, 118), (147, 119), (147, 120), (147, 121), (147, 122), (147, 123), (147, 124), (147, 125), (147, 127), (147, 134), (148, 85), (148, 100), (148, 102), (148, 103), (148, 104), (148, 105), (148, 106), (148, 107), (148, 108), (148, 109), (148, 110), (148, 111), (148, 112), (148, 113), (148, 114), (148, 115), (148, 116), (148, 117), (148, 118), (148, 119), (148, 120), (148, 121), (148, 122), (148, 123), (148, 125), (148, 135), (149, 99), (149, 101), (149, 102), (149, 103), (149, 105), (149, 106), (149, 107), (149, 108), (149, 109),
(149, 110), (149, 111), (149, 112), (149, 113), (149, 114), (149, 115), (149, 116), (149, 117), (149, 118), (149, 119), (149, 120), (149, 121), (149, 122), (149, 125), (149, 136), (149, 139), (150, 99), (150, 101), (150, 102), (150, 104), (150, 106), (150, 107), (150, 108), (150, 109), (150, 110), (150, 111), (150, 112), (150, 113), (150, 114), (150, 115), (150, 116), (150, 117), (150, 118), (150, 119), (150, 120), (150, 121), (150, 124), (150, 139), (151, 98), (151, 100), (151, 101), (151, 103), (151, 107), (151, 108), (151, 109), (151, 110), (151, 111), (151, 112), (151, 115), (151, 116), (151, 117), (151, 118), (151, 119), (151, 120), (151, 121), (151, 122), (151, 124), (151, 137), (151, 139), (152, 98), (152, 99), (152, 100), (152, 102), (152, 106), (152, 108), (152, 109), (152, 110), (152, 111), (152, 113), (152, 114), (152, 116), (152, 117),
(152, 118), (152, 119), (152, 121), (152, 138), (153, 97), (153, 99), (153, 101), (153, 107), (153, 109), (153, 110), (153, 112), (153, 115), (153, 117), (153, 118), (153, 120), (154, 96), (154, 98), (154, 100), (154, 107), (154, 109), (154, 111), (154, 116), (154, 118), (154, 120), (155, 95), (155, 97), (155, 99), (155, 107), (155, 110), (155, 117), (155, 120), (156, 94), (156, 97), (156, 99), (156, 107), (156, 109), (156, 117), (156, 120), (157, 93), (157, 95), (157, 98), (157, 107), (157, 118), (157, 121), (158, 92), (158, 94), (158, 97), (158, 98), (158, 107), (158, 108), (158, 118), (158, 121), (159, 107), (159, 118), (159, 121), (160, 106), (160, 107), (160, 119), (160, 121), (160, 132), (161, 106), (161, 119), (161, 120), (161, 132), (162, 106), (162, 119), (162, 120), (162, 132), (163, 106), (163, 119), (163, 132), (164, 106),
(164, 118), (164, 119), (164, 132), (165, 106), (165, 118), (165, 119), (165, 132), (166, 118), (166, 119), (167, 118), (167, 119), (168, 117), (168, 119), (169, 117), (169, 119), (170, 117), (170, 119), )
coordinates_E1E1E1 = ((76, 121),
(76, 122), (77, 120), (77, 122), (78, 120), (78, 122), (79, 120), (79, 122), (80, 105), (80, 120), (80, 123), (81, 105), (81, 119), (81, 121), (81, 122), (81, 123), (81, 127), (81, 129), (82, 105), (82, 119), (82, 121), (82, 122), (82, 123), (82, 125), (82, 126), (82, 128), (83, 105), (83, 119), (83, 121), (83, 122), (83, 123), (83, 124), (83, 127), (84, 105), (84, 119), (84, 121), (84, 122), (84, 123), (84, 124), (84, 126), (85, 104), (85, 106), (85, 119), (85, 121), (85, 122), (85, 123), (85, 124), (85, 126), (86, 103), (86, 106), (86, 118), (86, 120), (86, 121), (86, 122), (86, 123), (86, 125), (87, 102), (87, 106), (87, 118), (87, 120), (87, 121), (87, 122), (87, 123), (87, 125), (88, 92), (88, 101), (88, 104), (88, 105), (88, 107), (88, 118), (88, 120), (88, 121), (88, 122),
(88, 124), (88, 136), (89, 91), (89, 100), (89, 103), (89, 104), (89, 105), (89, 107), (89, 117), (89, 119), (89, 120), (89, 121), (89, 122), (89, 124), (89, 135), (89, 136), (90, 91), (90, 93), (90, 99), (90, 101), (90, 104), (90, 105), (90, 106), (90, 108), (90, 117), (90, 119), (90, 120), (90, 121), (90, 123), (90, 134), (90, 136), (91, 90), (91, 92), (91, 94), (91, 98), (91, 99), (91, 102), (91, 104), (91, 105), (91, 106), (91, 107), (91, 109), (91, 116), (91, 118), (91, 119), (91, 120), (91, 122), (91, 133), (91, 135), (91, 137), (92, 93), (92, 96), (92, 98), (92, 105), (92, 106), (92, 107), (92, 108), (92, 110), (92, 111), (92, 112), (92, 113), (92, 114), (92, 117), (92, 118), (92, 119), (92, 121), (92, 132), (92, 134), (92, 135), (92, 137), (93, 94), (93, 97),
(93, 104), (93, 106), (93, 107), (93, 108), (93, 109), (93, 112), (93, 113), (93, 116), (93, 117), (93, 118), (93, 119), (93, 121), (93, 132), (93, 134), (93, 135), (93, 136), (93, 138), (94, 105), (94, 107), (94, 108), (94, 109), (94, 110), (94, 111), (94, 112), (94, 113), (94, 114), (94, 115), (94, 116), (94, 117), (94, 118), (94, 119), (94, 121), (94, 132), (94, 134), (94, 135), (94, 136), (94, 137), (95, 106), (95, 108), (95, 109), (95, 110), (95, 111), (95, 112), (95, 113), (95, 114), (95, 115), (95, 116), (95, 117), (95, 118), (95, 119), (95, 121), (95, 132), (95, 134), (95, 135), (95, 136), (95, 139), (96, 106), (96, 108), (96, 109), (96, 110), (96, 111), (96, 112), (96, 113), (96, 114), (96, 115), (96, 116), (96, 117), (96, 118), (96, 119), (96, 120), (96, 122), (96, 131),
(96, 133), (96, 134), (96, 135), (96, 137), (97, 106), (97, 108), (97, 109), (97, 110), (97, 111), (97, 112), (97, 113), (97, 114), (97, 115), (97, 116), (97, 117), (97, 118), (97, 119), (97, 120), (97, 122), (97, 130), (97, 132), (97, 133), (97, 134), (97, 136), (98, 105), (98, 107), (98, 108), (98, 109), (98, 110), (98, 111), (98, 112), (98, 113), (98, 114), (98, 115), (98, 116), (98, 117), (98, 118), (98, 119), (98, 120), (98, 121), (98, 122), (98, 124), (98, 125), (98, 126), (98, 127), (98, 128), (98, 131), (98, 132), (98, 133), (98, 134), (98, 136), (99, 103), (99, 106), (99, 107), (99, 108), (99, 109), (99, 110), (99, 111), (99, 112), (99, 113), (99, 114), (99, 115), (99, 116), (99, 117), (99, 118), (99, 119), (99, 120), (99, 121), (99, 122), (99, 130), (99, 131), (99, 132),
(99, 133), (99, 135), (100, 101), (100, 105), (100, 106), (100, 107), (100, 108), (100, 109), (100, 110), (100, 111), (100, 112), (100, 113), (100, 114), (100, 115), (100, 116), (100, 117), (100, 118), (100, 119), (100, 120), (100, 121), (100, 122), (100, 123), (100, 124), (100, 125), (100, 126), (100, 127), (100, 128), (100, 129), (100, 130), (100, 131), (100, 132), (100, 133), (100, 135), (101, 88), (101, 90), (101, 98), (101, 100), (101, 103), (101, 104), (101, 105), (101, 106), (101, 107), (101, 108), (101, 109), (101, 110), (101, 111), (101, 112), (101, 113), (101, 114), (101, 115), (101, 116), (101, 117), (101, 118), (101, 119), (101, 120), (101, 121), (101, 122), (101, 123), (101, 124), (101, 125), (101, 126), (101, 127), (101, 128), (101, 129), (101, 130), (101, 131), (101, 132), (101, 133), (101, 135), (102, 85), (102, 87), (102, 88),
(102, 91), (102, 92), (102, 93), (102, 94), (102, 95), (102, 96), (102, 97), (102, 101), (102, 102), (102, 103), (102, 104), (102, 105), (102, 106), (102, 107), (102, 108), (102, 109), (102, 110), (102, 111), (102, 112), (102, 113), (102, 114), (102, 115), (102, 116), (102, 117), (102, 118), (102, 119), (102, 120), (102, 121), (102, 122), (102, 123), (102, 124), (102, 125), (102, 126), (102, 127), (102, 128), (102, 129), (102, 130), (102, 131), (102, 132), (102, 133), (102, 135), (103, 84), (103, 90), (103, 98), (103, 99), (103, 100), (103, 101), (103, 102), (103, 103), (103, 104), (103, 105), (103, 106), (103, 107), (103, 108), (103, 109), (103, 110), (103, 111), (103, 112), (103, 113), (103, 114), (103, 115), (103, 116), (103, 117), (103, 118), (103, 119), (103, 120), (103, 121), (103, 122), (103, 123), (103, 124), (103, 125), (103, 126),
(103, 127), (103, 128), (103, 129), (103, 130), (103, 131), (103, 132), (103, 133), (103, 134), (103, 136), (104, 91), (104, 93), (104, 94), (104, 95), (104, 96), (104, 97), (104, 98), (104, 99), (104, 100), (104, 101), (104, 102), (104, 103), (104, 104), (104, 105), (104, 106), (104, 107), (104, 108), (104, 109), (104, 110), (104, 111), (104, 112), (104, 113), (104, 114), (104, 115), (104, 116), (104, 117), (104, 118), (104, 119), (104, 120), (104, 121), (104, 122), (104, 123), (104, 124), (104, 125), (104, 126), (104, 127), (104, 128), (104, 129), (104, 130), (104, 131), (104, 132), (104, 133), (104, 134), (104, 135), (104, 138), (104, 139), (104, 141), (105, 91), (105, 93), (105, 94), (105, 95), (105, 96), (105, 97), (105, 98), (105, 99), (105, 100), (105, 101), (105, 102), (105, 103), (105, 108), (105, 109), (105, 110), (105, 111),
(105, 112), (105, 113), (105, 114), (105, 115), (105, 116), (105, 117), (105, 118), (105, 119), (105, 120), (105, 121), (105, 122), (105, 123), (105, 124), (105, 125), (105, 126), (105, 127), (105, 128), (105, 129), (105, 130), (105, 131), (105, 132), (105, 133), (105, 134), (105, 135), (105, 136), (105, 142), (106, 91), (106, 93), (106, 94), (106, 95), (106, 96), (106, 97), (106, 98), (106, 99), (106, 100), (106, 104), (106, 105), (106, 106), (106, 107), (106, 110), (106, 111), (106, 112), (106, 113), (106, 114), (106, 115), (106, 116), (106, 117), (106, 118), (106, 119), (106, 120), (106, 121), (106, 122), (106, 123), (106, 124), (106, 125), (106, 126), (106, 127), (106, 128), (106, 129), (106, 130), (106, 131), (106, 132), (106, 133), (106, 134), (106, 135), (106, 136), (106, 137), (106, 138), (106, 139), (106, 140), (106, 142), (107, 91),
(107, 93), (107, 94), (107, 95), (107, 96), (107, 97), (107, 98), (107, 101), (107, 102), (107, 103), (107, 108), (107, 111), (107, 112), (107, 113), (107, 114), (107, 115), (107, 116), (107, 117), (107, 118), (107, 119), (107, 120), (107, 121), (107, 122), (107, 123), (107, 124), (107, 125), (107, 126), (107, 127), (107, 128), (107, 129), (107, 130), (107, 131), (107, 132), (107, 133), (107, 134), (107, 135), (107, 136), (107, 137), (107, 138), (107, 139), (107, 141), (108, 91), (108, 93), (108, 94), (108, 95), (108, 96), (108, 97), (108, 100), (108, 110), (108, 112), (108, 113), (108, 114), (108, 115), (108, 116), (108, 117), (108, 118), (108, 119), (108, 120), (108, 121), (108, 122), (108, 123), (108, 124), (108, 125), (108, 126), (108, 127), (108, 128), (108, 129), (108, 130), (108, 131), (108, 132), (108, 133), (108, 134), (108, 135),
(108, 136), (108, 137), (108, 138), (108, 140), (109, 90), (109, 92), (109, 93), (109, 94), (109, 95), (109, 96), (109, 98), (109, 111), (109, 113), (109, 114), (109, 115), (109, 116), (109, 117), (109, 118), (109, 119), (109, 120), (109, 121), (109, 122), (109, 123), (109, 124), (109, 125), (109, 126), (109, 127), (109, 128), (109, 129), (109, 130), (109, 131), (109, 132), (109, 133), (109, 134), (109, 135), (109, 136), (109, 137), (109, 138), (109, 140), (110, 90), (110, 92), (110, 93), (110, 94), (110, 95), (110, 97), (110, 111), (110, 113), (110, 114), (110, 115), (110, 116), (110, 117), (110, 118), (110, 119), (110, 120), (110, 121), (110, 122), (110, 123), (110, 124), (110, 125), (110, 126), (110, 127), (110, 128), (110, 129), (110, 130), (110, 131), (110, 132), (110, 134), (110, 135), (110, 136), (110, 137), (110, 138), (110, 140),
(111, 89), (111, 91), (111, 92), (111, 93), (111, 94), (111, 96), (111, 111), (111, 113), (111, 114), (111, 115), (111, 116), (111, 117), (111, 118), (111, 119), (111, 120), (111, 121), (111, 122), (111, 123), (111, 124), (111, 125), (111, 126), (111, 127), (111, 128), (111, 129), (111, 130), (111, 133), (111, 136), (111, 137), (111, 138), (111, 140), (112, 88), (112, 90), (112, 94), (112, 96), (112, 111), (112, 113), (112, 114), (112, 115), (112, 116), (112, 117), (112, 118), (112, 119), (112, 120), (112, 121), (112, 122), (112, 123), (112, 124), (112, 125), (112, 126), (112, 127), (112, 128), (112, 129), (112, 131), (112, 134), (112, 137), (112, 140), (113, 77), (113, 82), (113, 83), (113, 84), (113, 85), (113, 86), (113, 87), (113, 89), (113, 91), (113, 92), (113, 96), (113, 111), (113, 113), (113, 114), (113, 115), (113, 117),
(113, 118), (113, 119), (113, 120), (113, 121), (113, 122), (113, 123), (113, 124), (113, 125), (113, 126), (113, 127), (113, 128), (113, 130), (113, 136), (113, 138), (113, 140), (114, 77), (114, 79), (114, 80), (114, 83), (114, 90), (114, 94), (114, 95), (114, 111), (114, 116), (114, 119), (114, 120), (114, 121), (114, 122), (114, 123), (114, 124), (114, 125), (114, 126), (114, 127), (114, 129), (114, 137), (115, 77), (115, 84), (115, 89), (115, 95), (115, 111), (115, 113), (115, 114), (115, 118), (115, 120), (115, 121), (115, 122), (115, 123), (115, 129), (115, 137), (116, 77), (116, 85), (116, 88), (116, 111), (116, 119), (116, 121), (116, 122), (116, 125), (116, 126), (116, 128), (117, 119), (117, 121), (117, 123), (118, 120), (118, 123), (119, 120), (119, 122), (120, 121), )
coordinates_FEDAB9 = ((126, 79),
(126, 80), (127, 78), (127, 82), (128, 77), (128, 79), (128, 80), (128, 82), (129, 76), (129, 78), (129, 79), (129, 80), (129, 81), (129, 83), (130, 75), (130, 77), (130, 78), (130, 79), (130, 80), (130, 81), (130, 83), (131, 75), (131, 77), (131, 78), (131, 79), (131, 80), (131, 81), (131, 82), (131, 84), (132, 76), (132, 78), (132, 79), (132, 80), (132, 81), (132, 82), (132, 84), (133, 76), (133, 78), (133, 79), (133, 80), (133, 81), (133, 82), (133, 83), (133, 85), (134, 76), (134, 78), (134, 85), (135, 76), (135, 80), (135, 81), (135, 82), (135, 83), (135, 85), (136, 75), (136, 78), (137, 75), (137, 77), (137, 78), (138, 74), (138, 77), (139, 74), (139, 77), (140, 74), (140, 76), (140, 77), (140, 78), (140, 79), (140, 80), (140, 81), (140, 82), (140, 83), (140, 84), (140, 85),
(140, 87), (141, 75), (141, 82), (141, 83), (141, 84), (141, 85), (141, 87), (141, 94), (141, 96), (141, 97), (141, 98), (141, 100), (142, 77), (142, 78), (142, 80), (142, 93), )
coordinates_D970D6 = ((123, 83),
(124, 79), (124, 81), (124, 85), (124, 86), (124, 87), (124, 88), (124, 89), (124, 90), (125, 81), (125, 83), (125, 84), (125, 90), (126, 86), (126, 88), (126, 90), (127, 88), (127, 90), (128, 88), (128, 90), (129, 89), (130, 89), )
coordinates_01CED1 = ((143, 82),
(143, 84), (143, 86), (143, 97), (143, 98), (143, 100), (144, 86), (144, 91), (144, 93), (144, 94), (144, 95), (144, 96), (144, 100), (145, 80), (145, 82), (145, 83), (145, 85), (145, 90), (145, 97), (145, 99), (146, 79), (146, 82), (146, 84), (146, 90), (146, 92), (146, 93), (146, 94), (146, 95), (146, 96), (146, 97), (146, 99), (147, 78), (147, 80), (147, 81), (147, 83), (147, 89), (147, 91), (147, 92), (147, 93), (147, 94), (147, 95), (147, 96), (147, 98), (148, 78), (148, 80), (148, 81), (148, 83), (148, 88), (148, 90), (148, 91), (148, 92), (148, 93), (148, 94), (148, 95), (148, 96), (148, 98), (149, 79), (149, 81), (149, 83), (149, 86), (149, 87), (149, 89), (149, 90), (149, 91), (149, 92), (149, 93), (149, 94), (149, 95), (149, 97), (150, 80), (150, 82), (150, 83), (150, 84),
(150, 85), (150, 92), (150, 93), (150, 94), (150, 96), (151, 81), (151, 89), (151, 92), (151, 94), (151, 96), (152, 82), (152, 84), (152, 85), (152, 87), (152, 92), (152, 95), (153, 91), (153, 94), (153, 103), (153, 105), (154, 91), (154, 93), (154, 105), (155, 90), (155, 92), (155, 102), (155, 105), (155, 112), (156, 89), (156, 91), (156, 101), (156, 103), (156, 105), (156, 113), (157, 88), (157, 90), (157, 101), (157, 103), (157, 105), (157, 111), (157, 113), (158, 87), (158, 90), (158, 100), (158, 102), (158, 103), (158, 104), (158, 105), (158, 110), (158, 113), (159, 87), (159, 90), (159, 99), (159, 101), (159, 102), (159, 104), (159, 110), (159, 113), (160, 87), (160, 88), (160, 89), (160, 90), (160, 91), (160, 92), (160, 93), (160, 94), (160, 95), (160, 96), (160, 97), (160, 100), (160, 101), (160, 102),
(160, 104), (160, 109), (160, 111), (160, 113), (161, 87), (161, 89), (161, 90), (161, 100), (161, 101), (161, 102), (161, 104), (161, 109), (161, 110), (161, 111), (161, 113), (162, 90), (162, 91), (162, 92), (162, 93), (162, 94), (162, 95), (162, 96), (162, 97), (162, 98), (162, 99), (162, 100), (162, 101), (162, 102), (162, 104), (162, 108), (162, 110), (162, 111), (162, 113), (163, 88), (163, 96), (163, 100), (163, 101), (163, 102), (163, 104), (163, 108), (163, 110), (163, 111), (163, 113), (164, 90), (164, 92), (164, 93), (164, 94), (164, 95), (164, 96), (164, 97), (164, 98), (164, 101), (164, 102), (164, 104), (164, 108), (164, 110), (164, 111), (164, 113), (165, 100), (165, 102), (165, 104), (165, 108), (165, 110), (165, 111), (165, 113), (166, 101), (166, 104), (166, 105), (166, 108), (166, 110), (166, 111), (166, 113),
(167, 101), (167, 103), (167, 105), (167, 109), (167, 111), (167, 113), (168, 101), (168, 103), (168, 104), (168, 106), (168, 109), (168, 111), (168, 113), (169, 102), (169, 104), (169, 105), (169, 108), (169, 109), (169, 110), (169, 112), (170, 103), (170, 106), (170, 109), (170, 110), (170, 112), (171, 104), (171, 107), (171, 108), (171, 109), (171, 111), (172, 106), (172, 110), (173, 107), (173, 109), )
coordinates_FE3E96 = ((121, 99),
(121, 100), (121, 101), (121, 102), (122, 94), (122, 95), (122, 96), (122, 97), (122, 98), (122, 104), (122, 105), (122, 132), (122, 133), (122, 134), (122, 136), (123, 93), (123, 99), (123, 100), (123, 101), (123, 102), (123, 103), (123, 107), (123, 108), (123, 109), (123, 110), (123, 111), (123, 112), (123, 113), (123, 114), (123, 115), (123, 116), (123, 117), (123, 119), (123, 124), (123, 126), (123, 127), (123, 128), (123, 129), (123, 130), (123, 131), (123, 136), (124, 93), (124, 95), (124, 97), (124, 100), (124, 101), (124, 102), (124, 103), (124, 104), (124, 105), (124, 106), (124, 118), (124, 125), (124, 132), (124, 136), (125, 92), (125, 94), (125, 96), (125, 99), (125, 101), (125, 102), (125, 103), (125, 104), (125, 105), (125, 106), (125, 107), (125, 108), (125, 111), (125, 112), (125, 117), (125, 126), (125, 128), (125, 129),
(125, 130), (125, 131), (125, 134), (125, 136), (126, 92), (126, 95), (126, 100), (126, 102), (126, 103), (126, 104), (126, 105), (126, 106), (126, 107), (126, 110), (126, 113), (126, 115), (126, 127), (126, 132), (126, 136), (127, 92), (127, 100), (127, 102), (127, 103), (127, 104), (127, 105), (127, 106), (127, 108), (127, 128), (127, 131), (127, 135), (127, 136), (128, 92), (128, 94), (128, 101), (128, 103), (128, 104), (128, 105), (128, 107), (128, 130), (128, 135), (128, 136), (129, 92), (129, 93), (129, 102), (129, 104), (129, 105), (129, 107), (129, 135), (129, 136), (130, 92), (130, 102), (130, 104), (130, 105), (130, 107), (130, 135), (131, 103), (131, 105), (131, 107), (132, 103), (132, 107), (133, 104), (133, 106), )
coordinates_AF3060 = ((123, 146),
(123, 148), (123, 149), (123, 151), (124, 145), (124, 151), (125, 145), (125, 147), (125, 151), (126, 146), (126, 149), (127, 146), (128, 145), (128, 147), (129, 144), (129, 146), (130, 143), (130, 145), (131, 143), (131, 144), (132, 143), )
coordinates_ACFF2F = ((128, 149),
(128, 152), (129, 148), (129, 152), (130, 147), (130, 149), (130, 150), (130, 152), (131, 146), (131, 149), (131, 150), (131, 152), (132, 148), (132, 149), (132, 150), (132, 152), (133, 147), (133, 148), (133, 149), (133, 150), (133, 152), (134, 143), (134, 148), (134, 149), (134, 150), (134, 152), (135, 148), (135, 149), (136, 147), (136, 149), (136, 151), (137, 147), (137, 150), (138, 146), (138, 149), (139, 145), (139, 148), (140, 143), (140, 146), (140, 148), (141, 141), (141, 147), (142, 139), (142, 143), (142, 144), (142, 145), (142, 147), (143, 138), (143, 141), (143, 142), (143, 147), (144, 139), )
coordinates_FFDAB9 = ((109, 74),
(109, 76), (109, 86), (109, 88), (110, 74), (110, 78), (110, 83), (110, 84), (110, 85), (111, 73), (111, 75), (111, 77), (111, 80), (111, 81), (111, 82), (111, 83), (111, 84), (111, 85), (111, 87), (112, 73), (112, 75), (112, 79), (113, 72), (113, 75), (114, 72), (114, 75), (115, 72), (115, 75), (116, 72), (116, 75), (117, 72), (117, 75), (118, 73), (118, 74), )
coordinates_DA70D6 = ((116, 80),
(116, 82), (116, 83), (116, 90), (117, 79), (117, 84), (117, 89), (117, 90), (118, 80), (118, 81), (118, 82), (118, 85), (118, 88), (118, 90), (119, 76), (119, 78), (119, 83), (119, 84), (119, 87), (119, 90), (120, 79), (120, 80), (120, 81), (120, 82), (120, 90), (121, 84), (121, 85), (121, 86), (121, 87), (121, 88), (121, 90), )
coordinates_00CED1 = ((74, 101),
(74, 103), (74, 104), (74, 105), (75, 99), (75, 107), (75, 108), (76, 98), (76, 101), (76, 102), (76, 103), (76, 104), (76, 105), (76, 106), (76, 108), (77, 97), (77, 99), (77, 100), (77, 101), (77, 102), (77, 103), (77, 106), (77, 107), (77, 109), (78, 98), (78, 100), (78, 101), (78, 102), (78, 103), (78, 104), (78, 105), (78, 106), (78, 107), (78, 109), (79, 98), (79, 100), (79, 101), (79, 103), (79, 107), (79, 110), (80, 92), (80, 93), (80, 99), (80, 101), (80, 103), (80, 107), (80, 110), (81, 91), (81, 95), (81, 98), (81, 100), (81, 101), (81, 103), (81, 107), (81, 110), (82, 90), (82, 93), (82, 94), (82, 97), (82, 98), (82, 99), (82, 100), (82, 101), (82, 103), (82, 107), (82, 109), (82, 111), (83, 90), (83, 92), (83, 93), (83, 94), (83, 95), (83, 98),
(83, 99), (83, 100), (83, 101), (83, 103), (83, 107), (83, 108), (83, 111), (84, 89), (84, 91), (84, 93), (84, 94), (84, 95), (84, 96), (84, 97), (84, 98), (84, 99), (84, 100), (84, 101), (84, 103), (84, 108), (84, 110), (84, 112), (85, 89), (85, 94), (85, 95), (85, 96), (85, 97), (85, 98), (85, 99), (85, 100), (85, 102), (85, 108), (85, 110), (85, 111), (85, 113), (86, 88), (86, 90), (86, 94), (86, 95), (86, 96), (86, 97), (86, 98), (86, 99), (86, 101), (86, 108), (86, 110), (86, 111), (86, 113), (87, 88), (87, 90), (87, 94), (87, 96), (87, 97), (87, 98), (87, 100), (87, 109), (87, 111), (87, 113), (88, 86), (88, 88), (88, 90), (88, 94), (88, 96), (88, 97), (88, 99), (88, 109), (88, 112), (89, 84), (89, 89), (89, 95), (89, 98), (89, 109),
(89, 112), (90, 82), (90, 85), (90, 86), (90, 88), (90, 95), (90, 97), (90, 110), (90, 112), (91, 84), (91, 85), (91, 87), (92, 81), (92, 83), (92, 84), (92, 85), (92, 87), (93, 80), (93, 82), (93, 83), (93, 84), (93, 85), (93, 86), (93, 87), (93, 88), (93, 89), (93, 91), (93, 100), (93, 102), (94, 79), (94, 81), (94, 82), (94, 83), (94, 84), (94, 85), (94, 86), (94, 87), (94, 93), (94, 99), (94, 103), (95, 79), (95, 81), (95, 82), (95, 83), (95, 84), (95, 85), (95, 86), (95, 87), (95, 88), (95, 89), (95, 90), (95, 91), (95, 94), (95, 100), (95, 101), (95, 103), (96, 78), (96, 80), (96, 81), (96, 82), (96, 83), (96, 84), (96, 85), (96, 86), (96, 87), (96, 88), (96, 89), (96, 90), (96, 91), (96, 92), (96, 93), (96, 96),
(96, 99), (96, 100), (96, 101), (96, 102), (96, 104), (97, 78), (97, 80), (97, 81), (97, 82), (97, 83), (97, 84), (97, 85), (97, 86), (97, 87), (97, 88), (97, 89), (97, 90), (97, 91), (97, 92), (97, 93), (97, 94), (97, 95), (97, 97), (97, 98), (97, 99), (97, 103), (98, 78), (98, 80), (98, 81), (98, 82), (98, 83), (98, 84), (98, 85), (98, 86), (98, 92), (98, 93), (98, 94), (98, 95), (98, 96), (98, 97), (98, 101), (98, 102), (99, 79), (99, 81), (99, 82), (99, 83), (99, 84), (99, 87), (99, 88), (99, 89), (99, 90), (99, 91), (99, 98), (99, 99), (100, 80), (100, 82), (100, 85), (100, 86), (100, 92), (100, 93), (100, 94), (100, 95), (100, 96), (101, 80), (101, 82), (101, 84), (102, 79), (102, 82), (103, 77), (103, 80), (103, 82), (104, 79),
(104, 80), (104, 82), (104, 86), (104, 88), (105, 77), (105, 79), (105, 80), (105, 81), (105, 82), (105, 83), (105, 84), (105, 85), (105, 89), (106, 78), (107, 79), (107, 81), (107, 85), (107, 86), (107, 88), (108, 83), (108, 84), )
coordinates_A120F0 = ((122, 138),
(122, 140), (122, 141), (122, 143), (123, 138), (123, 144), (124, 138), (124, 140), (124, 143), (125, 138), (126, 138), (126, 140), (127, 138), (127, 140), (128, 138), (128, 139), (129, 138), (130, 137), )
coordinates_ADFF2F = ((100, 137),
(100, 139), (100, 140), (100, 141), (100, 142), (100, 143), (100, 144), (100, 146), (101, 137), (101, 146), (102, 138), (102, 140), (102, 141), (102, 144), (102, 146), (103, 143), (103, 146), (104, 144), (104, 146), (105, 144), (105, 147), (106, 144), (106, 147), (107, 144), (107, 146), (107, 148), (108, 143), (108, 145), (108, 146), (108, 147), (108, 149), (109, 142), (109, 144), (109, 145), (109, 146), (109, 147), (109, 149), (110, 143), (110, 146), (110, 147), (110, 149), (111, 144), (111, 147), (111, 149), (112, 146), (112, 149), (113, 147), (113, 149), )
coordinates_A020F0 = ((114, 142),
(115, 140), (115, 143), (116, 141), (116, 144), (117, 141), (117, 144), (118, 141), (118, 143), (118, 145), (119, 142), (119, 146), (120, 142), (120, 146), (121, 145), (121, 146), )
coordinates_B03060 = ((111, 142),
(112, 142), (112, 143), (113, 143), (113, 144), (114, 144), (114, 146), (115, 145), (115, 147), (115, 148), (116, 146), (116, 149), (116, 150), (116, 152), (117, 147), (117, 152), (118, 147), (118, 149), (118, 150), (118, 152), (119, 148), (119, 152), (120, 148), (120, 150), (120, 152), (121, 152), )
coordinates_ACD8E6 = ((79, 137),
(80, 136), (80, 138), (81, 135), (81, 137), (81, 139), (82, 134), (82, 136), (82, 137), (83, 133), (83, 136), (83, 137), (83, 138), (83, 140), (84, 133), (84, 135), (84, 136), (84, 137), (84, 138), (84, 139), (84, 141), (85, 132), (85, 134), (85, 138), (85, 139), (85, 141), (86, 132), (86, 133), (86, 138), (86, 139), (86, 140), (86, 142), (87, 131), (87, 134), (87, 138), (87, 140), (87, 142), (88, 130), (88, 133), (88, 138), (88, 140), (88, 141), (88, 143), (89, 129), (89, 132), (89, 138), (89, 143), (90, 129), (90, 131), (90, 139), (90, 141), (90, 142), (91, 128), (91, 131), (92, 130), (93, 127), (93, 130), (94, 127), (94, 130), (95, 127), (95, 129), (96, 127), (96, 128), )
coordinates_FF3E96 = ((109, 102),
(109, 104), (109, 105), (109, 106), (109, 108), (110, 100), (110, 109), (111, 99), (111, 102), (111, 103), (111, 104), (111, 105), (111, 106), (111, 107), (111, 109), (112, 98), (112, 100), (112, 101), (112, 102), (112, 103), (112, 104), (112, 105), (112, 106), (112, 107), (112, 109), (113, 98), (113, 100), (113, 101), (113, 102), (113, 103), (113, 104), (113, 105), (113, 106), (113, 107), (113, 109), (114, 98), (114, 100), (114, 101), (114, 102), (114, 103), (114, 104), (114, 105), (114, 106), (114, 108), (114, 109), (114, 132), (114, 134), (115, 97), (115, 99), (115, 100), (115, 101), (115, 102), (115, 103), (115, 104), (115, 105), (115, 106), (115, 108), (115, 109), (115, 131), (115, 135), (116, 93), (116, 97), (116, 99), (116, 100), (116, 101), (116, 102), (116, 103), (116, 104), (116, 105), (116, 106), (116, 107), (116, 109), (116, 131),
(116, 133), (116, 134), (116, 136), (117, 93), (117, 94), (117, 97), (117, 99), (117, 100), (117, 101), (117, 102), (117, 103), (117, 104), (117, 105), (117, 106), (117, 107), (117, 109), (117, 113), (117, 114), (117, 115), (117, 117), (117, 130), (117, 132), (117, 133), (117, 134), (117, 135), (117, 137), (117, 139), (118, 93), (118, 96), (118, 97), (118, 104), (118, 105), (118, 106), (118, 107), (118, 108), (118, 109), (118, 111), (118, 112), (118, 113), (118, 114), (118, 118), (118, 125), (118, 127), (118, 128), (118, 131), (118, 132), (118, 133), (118, 135), (118, 136), (118, 139), (119, 93), (119, 98), (119, 99), (119, 100), (119, 101), (119, 102), (119, 103), (119, 112), (119, 116), (119, 118), (119, 124), (119, 134), (120, 93), (120, 95), (120, 96), (120, 104), (120, 105), (120, 106), (120, 107), (120, 108), (120, 110), (120, 117),
(120, 118), (120, 124), (120, 126), (120, 127), (120, 128), (120, 129), (120, 130), (120, 131), (120, 132), (120, 133), (120, 136), (120, 137), (120, 138), (120, 140), (121, 110), )
coordinates_7EFFD4 = ((153, 123),
(153, 125), (154, 124), (154, 126), (155, 125), (155, 127), (156, 126), (156, 128), (157, 126), (157, 129), (158, 126), (158, 129), (159, 127), (159, 130), (160, 127), (160, 130), (161, 127), (161, 130), (162, 127), (162, 130), (163, 127), (163, 130), (164, 127), (164, 128), (164, 130), (165, 128), (165, 130), (165, 134), (166, 128), (166, 130), (166, 133), (166, 135), (167, 128), (167, 130), (167, 131), (167, 132), (167, 134), (167, 136), (168, 128), (168, 129), (168, 130), (168, 133), (168, 135), (169, 129), (169, 131), (169, 134), (170, 129), (170, 133), (171, 128), (171, 131), (172, 128), (172, 130), )
coordinates_B12222 = ((149, 127),
(150, 127), (151, 130), (152, 127), (152, 131), (153, 128), (153, 131), (154, 129), (154, 132), (155, 130), (155, 133), (156, 130), (156, 134), (157, 131), (157, 135), (158, 133), (158, 136), (159, 134), (159, 137), (160, 134), (160, 136), (160, 139), (160, 140), (161, 134), (161, 136), (161, 137), (161, 139), (162, 134), (162, 136), (162, 137), (162, 139), (163, 134), (163, 138), (164, 135), (164, 138), (165, 137), )
coordinates_7FFFD4 = ((71, 124),
(71, 126), (72, 110), (72, 112), (72, 113), (72, 114), (72, 120), (72, 122), (72, 127), (72, 128), (73, 109), (73, 116), (73, 117), (73, 118), (73, 119), (73, 124), (73, 125), (73, 126), (73, 128), (74, 109), (74, 111), (74, 112), (74, 113), (74, 114), (74, 115), (74, 121), (74, 122), (74, 123), (74, 124), (74, 125), (74, 127), (75, 110), (75, 112), (75, 113), (75, 114), (75, 115), (75, 116), (75, 117), (75, 119), (75, 124), (75, 126), (76, 111), (76, 113), (76, 114), (76, 115), (76, 116), (76, 117), (76, 119), (76, 124), (76, 126), (77, 111), (77, 113), (77, 114), (77, 115), (77, 116), (77, 118), (77, 124), (77, 125), (78, 112), (78, 114), (78, 115), (78, 116), (78, 118), (78, 125), (79, 112), (79, 114), (79, 115), (79, 116), (79, 117), (79, 118), (79, 125), (80, 112), (80, 114),
(80, 115), (80, 117), (81, 112), (81, 114), (81, 115), (81, 117), (82, 113), (82, 115), (82, 117), (83, 114), (83, 117), (84, 115), (84, 117), (85, 116), (86, 116), (87, 115), (87, 116), (88, 115), (88, 116), (89, 114), (89, 115), (90, 114), )
coordinates_B22222 = ((74, 129),
(75, 132), (76, 128), (76, 130), (76, 133), (77, 128), (77, 130), (77, 131), (77, 132), (78, 127), (78, 131), (78, 132), (78, 133), (78, 135), (79, 127), (79, 129), (79, 130), (79, 131), (79, 132), (79, 135), (80, 131), (80, 134), (81, 131), (81, 133), (82, 131), (82, 132), (83, 130), (83, 131), (84, 129), (85, 128), (85, 130), (86, 128), (86, 129), (87, 127), (88, 128), (89, 126), (89, 127), (90, 125), (90, 126), (91, 126), (92, 124), (92, 125), (93, 123), (93, 125), (94, 123), (94, 125), (95, 125), (96, 124), )
coordinates_499B3C = ((144, 144),
(144, 145), (145, 136), (145, 141), (145, 142), (145, 143), (145, 146), (146, 136), (146, 140), (146, 143), (146, 144), (146, 146), (147, 137), (147, 139), (147, 143), (147, 144), (147, 146), (148, 130), (148, 132), (148, 142), (148, 143), (148, 144), (148, 146), (149, 133), (149, 141), (149, 143), (149, 145), (150, 131), (150, 134), (150, 141), (150, 143), (150, 145), (151, 132), (151, 135), (151, 141), (151, 144), (152, 133), (152, 136), (152, 141), (152, 144), (153, 134), (153, 137), (153, 140), (153, 143), (154, 135), (154, 137), (154, 140), (154, 141), (154, 143), (155, 136), (155, 140), (155, 142), (156, 137), (156, 139), (156, 140), (156, 142), (157, 138), (157, 141), (158, 139), (158, 141), )
coordinates_633263 = ((154, 114),
(155, 115), (155, 123), (156, 115), (156, 123), (157, 115), (157, 123), (157, 124), (158, 115), (158, 116), (158, 123), (158, 124), (159, 115), (159, 116), (159, 123), (159, 124), (160, 115), (160, 123), (161, 115), (161, 117), (161, 123), (161, 125), (162, 116), (162, 117), (162, 122), (162, 125), (163, 115), (163, 116), (163, 122), (163, 125), (164, 115), (164, 116), (164, 122), (164, 125), (165, 115), (165, 116), (165, 122), (165, 125), (166, 115), (166, 116), (166, 122), (166, 124), (166, 126), (167, 115), (167, 122), (167, 124), (167, 126), (168, 115), (168, 122), (168, 124), (168, 126), (169, 115), (169, 122), (169, 124), (169, 126), (170, 114), (170, 115), (170, 121), (170, 122), (170, 123), (170, 124), (170, 126), (171, 115), (171, 121), (171, 123), (171, 124), (171, 126), (172, 113), (172, 115), (172, 116), (172, 117), (172, 118), (172, 119),
(172, 122), (172, 123), (173, 113), (173, 115), (173, 121), (173, 124), (174, 113), (174, 121), (174, 122), (174, 124), (175, 115), (175, 116), (175, 117), (175, 118), (175, 119), (175, 120), )
coordinates_4A9B3C = ((92, 140),
(92, 142), (92, 143), (92, 145), (93, 140), (93, 145), (94, 141), (94, 143), (94, 145), (95, 142), (95, 145), (96, 142), (96, 145), (97, 139), (97, 145), (98, 138), (98, 140), (98, 141), (98, 142), (98, 143), (98, 145), (98, 146), )
coordinates_218B22 = ((150, 156),
(150, 158), (151, 155), (151, 159), (152, 154), (152, 156), (152, 157), (152, 159), (153, 152), (153, 155), (153, 156), (153, 157), (153, 158), (153, 160), (154, 150), (154, 154), (154, 155), (154, 156), (154, 160), (155, 149), (155, 152), (155, 153), (155, 154), (155, 155), (155, 159), (156, 148), (156, 150), (156, 151), (156, 152), (156, 153), (156, 154), (156, 155), (156, 156), (157, 148), (157, 150), (157, 151), (157, 152), (157, 153), (157, 155), (158, 148), (158, 150), (158, 151), (158, 152), (158, 153), (158, 155), (159, 147), (159, 149), (159, 150), (159, 151), (159, 152), (159, 153), (159, 154), (159, 155), (159, 156), (160, 147), (160, 149), (160, 150), (160, 155), (160, 158), (161, 147), (161, 151), (161, 152), (161, 153), (161, 154), (161, 158), (162, 147), (162, 149), (162, 150), (162, 155), (162, 157), )
coordinates_228B22 = ((78, 147),
(78, 148), (79, 147), (79, 149), (80, 147), (80, 150), (81, 147), (81, 148), (81, 149), (81, 151), (82, 148), (82, 151), (83, 148), (83, 150), (83, 152), (84, 148), (84, 150), (84, 152), (85, 149), (85, 151), (85, 153), (86, 149), (86, 151), (86, 153), (87, 149), (87, 151), (87, 153), (88, 150), (88, 152), (88, 154), (89, 150), (89, 152), (89, 154), (90, 151), (90, 154), (91, 151), (91, 154), (92, 152), (92, 155), (93, 153), (93, 156), (94, 154), (94, 158), (95, 155), (95, 158), )
| 470.988889 | 865 | 0.482177 |
eb7c3fb12e03f2b24dcc584553fc30f0b1f73b73 | 2,253 | py | Python | examples/Redfish/expand_data.py | andreaslangnevyjel/python-ilorest-library | cd40e5ed9dfd615074d34ec6bb929dc8ea04a797 | [
"Apache-2.0"
] | 214 | 2016-04-04T12:24:52.000Z | 2022-03-28T11:35:46.000Z | examples/Redfish/expand_data.py | andreaslangnevyjel/python-ilorest-library | cd40e5ed9dfd615074d34ec6bb929dc8ea04a797 | [
"Apache-2.0"
] | 139 | 2016-04-02T04:22:29.000Z | 2022-03-25T06:54:45.000Z | examples/Redfish/expand_data.py | andreaslangnevyjel/python-ilorest-library | cd40e5ed9dfd615074d34ec6bb929dc8ea04a797 | [
"Apache-2.0"
] | 116 | 2016-04-04T20:39:42.000Z | 2021-11-13T06:53:41.000Z | # Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""
An example of expanding data responses
"""
import sys
import json
from redfish import RedfishClient
from redfish.rest.v1 import ServerDownOrUnreachableError
def expand_data(_redfishobj, expand_url="/redfish/v1/"):
response = _redfishobj.get(expand_url)
exp_response = _redfishobj.get(expand_url+'?$expand=.')
sys.stdout.write('Standard response:\n')
sys.stdout.write('\t'+str(response.dict)+'\n')
sys.stdout.write('Expanded response:\n')
sys.stdout.write('\t'+str(exp_response.dict)+'\n')
if __name__ == "__main__":
# When running on the server locally use the following commented values
#SYSTEM_URL = None
#LOGIN_ACCOUNT = None
#LOGIN_PASSWORD = None
# When running remotely connect using the secured (https://) address,
# account name, and password to send https requests
# SYSTEM_URL acceptable examples:
# "https://10.0.0.100"
# "https://ilo.hostname"
SYSTEM_URL = "https://10.0.0.100"
LOGIN_ACCOUNT = "admin"
LOGIN_PASSWORD = "password"
#url to be expanded
EXPAND_URL = "/redfish/v1/systems/"
try:
# Create a Redfish client object
REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \
password=LOGIN_PASSWORD)
# Login with the Redfish client
REDFISHOBJ.login()
except ServerDownOrUnreachableError as excp:
sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n")
sys.exit()
expand_data(REDFISHOBJ, EXPAND_URL)
REDFISHOBJ.logout()
| 35.203125 | 100 | 0.684421 |
6989358b5828b06e1b53569a06aa7612e515fb30 | 26,624 | py | Python | tensorflow_probability/python/math/linalg_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/math/linalg_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/math/linalg_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
from hypothesis.extra import numpy as hpnp
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
class _PinvTest(object):
def expected_pinv(self, a, rcond):
"""Calls `np.linalg.pinv` but corrects its broken batch semantics."""
if a.ndim < 3:
return np.linalg.pinv(a, rcond)
if rcond is None:
rcond = 10. * max(a.shape[-2], a.shape[-1]) * np.finfo(a.dtype).eps
s = np.concatenate([a.shape[:-2], [a.shape[-1], a.shape[-2]]])
a_pinv = np.zeros(s, dtype=a.dtype)
for i in np.ndindex(a.shape[:(a.ndim - 2)]):
a_pinv[i] = np.linalg.pinv(
a[i],
rcond=rcond if isinstance(rcond, float) else rcond[i])
return a_pinv
def test_symmetric(self):
a_ = self.dtype([[1., .4, .5],
[.4, .2, .25],
[.5, .25, .35]])
a_ = np.stack([a_ + 1., a_], axis=0) # Batch of matrices.
a = tf1.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
rcond = self.dtype([0., 0.01]) # Smallest 1 component is forced to zero.
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = tfp.math.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_,
atol=1e-5, rtol=1e-5)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
def test_nonsquare(self):
a_ = self.dtype([[1., .4, .5, 1.],
[.4, .2, .25, 2.],
[.5, .25, .35, 3.]])
a_ = np.stack([a_ + 0.5, a_], axis=0) # Batch of matrices.
a = tf1.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
# Smallest 2 components are forced to zero.
rcond = self.dtype([0., 0.25])
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = tfp.math.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_,
atol=1e-5, rtol=1e-4)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32DefaultRcond(tf.test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64DefaultRcond(tf.test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32CustomtRcond(tf.test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = False
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64CustomRcond(tf.test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = False
class _CholeskyExtend(tf.test.TestCase):
def testCholeskyExtension(self):
xs = np.random.random(7).astype(self.dtype)[:, tf.newaxis]
xs = tf1.placeholder_with_default(
xs, shape=xs.shape if self.use_static_shape else None)
k = tfp.positive_semidefinite_kernels.MaternOneHalf()
mat = k.matrix(xs, xs)
chol = tf.linalg.cholesky(mat)
ys = np.random.random(3).astype(self.dtype)[:, tf.newaxis]
ys = tf1.placeholder_with_default(
ys, shape=ys.shape if self.use_static_shape else None)
xsys = tf.concat([xs, ys], 0)
new_chol_expected = tf.linalg.cholesky(k.matrix(xsys, xsys))
new_chol = tfp.math.cholesky_concat(chol, k.matrix(xsys, ys))
self.assertAllClose(new_chol_expected, new_chol)
@hp.given(hps.data())
@hp.settings(deadline=None, max_examples=10,
derandomize=tfp_test_util.derandomize_hypothesis())
def testCholeskyExtensionRandomized(self, data):
jitter = lambda n: tf.linalg.eye(n, dtype=self.dtype) * 1e-5
target_bs = data.draw(hpnp.array_shapes())
prev_bs, new_bs = data.draw(tfp_test_util.broadcasting_shapes(target_bs, 2))
ones = tf.TensorShape([1] * len(target_bs))
smallest_shared_shp = tuple(np.min(
[tf.broadcast_static_shape(ones, shp).as_list()
for shp in [prev_bs, new_bs]],
axis=0))
z = data.draw(hps.integers(min_value=1, max_value=12))
n = data.draw(hps.integers(min_value=0, max_value=z - 1))
m = z - n
np.random.seed(data.draw(hps.integers(min_value=0, max_value=2**32 - 1)))
xs = np.random.uniform(size=smallest_shared_shp + (n,))
data.draw(hps.just(xs))
xs = (xs + np.zeros(prev_bs.as_list() + [n]))[..., np.newaxis]
xs = xs.astype(self.dtype)
xs = tf1.placeholder_with_default(
xs, shape=xs.shape if self.use_static_shape else None)
k = tfp.positive_semidefinite_kernels.MaternOneHalf()
mat = k.matrix(xs, xs) + jitter(n)
chol = tf.linalg.cholesky(mat)
ys = np.random.uniform(size=smallest_shared_shp + (m,))
data.draw(hps.just(ys))
ys = (ys + np.zeros(new_bs.as_list() + [m]))[..., np.newaxis]
ys = ys.astype(self.dtype)
ys = tf1.placeholder_with_default(
ys, shape=ys.shape if self.use_static_shape else None)
xsys = tf.concat([xs + tf.zeros(target_bs + (n, 1), dtype=self.dtype),
ys + tf.zeros(target_bs + (m, 1), dtype=self.dtype)],
axis=-2)
new_chol_expected = tf.linalg.cholesky(k.matrix(xsys, xsys) + jitter(z))
new_chol = tfp.math.cholesky_concat(
chol, k.matrix(xsys, ys) + jitter(z)[:, n:])
self.assertAllClose(new_chol_expected, new_chol, rtol=1e-5, atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class CholeskyExtend32Static(_CholeskyExtend):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class CholeskyExtend64Dynamic(_CholeskyExtend):
dtype = np.float64
use_static_shape = False
del _CholeskyExtend
class _PivotedCholesky(tf.test.TestCase, parameterized.TestCase):
def _random_batch_psd(self, dim):
matrix = np.random.random([2, dim, dim])
matrix = np.matmul(matrix, np.swapaxes(matrix, -2, -1))
matrix = (matrix + np.diag(np.arange(dim) * .1)).astype(self.dtype)
masked_shape = (
matrix.shape if self.use_static_shape else [None] * len(matrix.shape))
matrix = tf1.placeholder_with_default(matrix, shape=masked_shape)
return matrix
def testPivotedCholesky(self):
dim = 11
matrix = self._random_batch_psd(dim)
true_diag = tf.linalg.diag_part(matrix)
pchol = tfp.math.pivoted_cholesky(matrix, max_rank=1)
mat = tf.matmul(pchol, pchol, transpose_b=True)
diag_diff_prev = self.evaluate(tf.abs(tf.linalg.diag_part(mat) - true_diag))
diff_norm_prev = self.evaluate(
tf.linalg.norm(tensor=mat - matrix, ord='fro', axis=[-1, -2]))
for rank in range(2, dim + 1):
# Specifying diag_rtol forces the full max_rank decomposition.
pchol = tfp.math.pivoted_cholesky(matrix, max_rank=rank, diag_rtol=-1)
zeros_per_col = dim - tf.math.count_nonzero(pchol, axis=-2)
mat = tf.matmul(pchol, pchol, transpose_b=True)
pchol_shp, diag_diff, diff_norm, zeros_per_col = self.evaluate([
tf.shape(pchol),
tf.abs(tf.linalg.diag_part(mat) - true_diag),
tf.linalg.norm(tensor=mat - matrix, ord='fro', axis=[-1, -2]),
zeros_per_col
])
self.assertAllEqual([2, dim, rank], pchol_shp)
self.assertAllEqual(
np.ones([2, rank], dtype=np.bool), zeros_per_col >= np.arange(rank))
self.assertAllLessEqual(diag_diff - diag_diff_prev,
np.finfo(self.dtype).resolution)
self.assertAllLessEqual(diff_norm - diff_norm_prev,
np.finfo(self.dtype).resolution)
diag_diff_prev, diff_norm_prev = diag_diff, diff_norm
def testGradient(self):
dim = 11
matrix = self._random_batch_psd(dim)
_, dmatrix = tfp.math.value_and_gradient(
lambda matrix: tfp.math.pivoted_cholesky(matrix, max_rank=dim // 3),
matrix)
self.assertIsNotNone(dmatrix)
self.assertAllGreater(
tf.linalg.norm(tensor=dmatrix, ord='fro', axis=[-1, -2]), 0.)
@test_util.enable_control_flow_v2
def testGradientTapeCFv2(self):
dim = 11
matrix = self._random_batch_psd(dim)
with tf.GradientTape() as tape:
tape.watch(matrix)
pchol = tfp.math.pivoted_cholesky(matrix, max_rank=dim // 3)
dmatrix = tape.gradient(
pchol, matrix, output_gradients=tf.ones_like(pchol) * .01)
self.assertIsNotNone(dmatrix)
self.assertAllGreater(
tf.linalg.norm(tensor=dmatrix, ord='fro', axis=[-1, -2]), 0.)
# pyformat: disable
@parameterized.parameters(
# Inputs are randomly shuffled arange->tril; outputs from gpytorch.
(
np.array([
[7., 0, 0, 0, 0, 0],
[9, 13, 0, 0, 0, 0],
[4, 10, 6, 0, 0, 0],
[18, 1, 2, 14, 0, 0],
[5, 11, 20, 3, 17, 0],
[19, 12, 16, 15, 8, 21]
]),
np.array([
[3.4444, -1.3545, 4.084, 1.7674, -1.1789, 3.7562],
[8.4685, 1.2821, 3.1179, 12.9197, 0.0000, 0.0000],
[7.5621, 4.8603, 0.0634, 7.3942, 4.0637, 0.0000],
[15.435, -4.8864, 16.2137, 0.0000, 0.0000, 0.0000],
[18.8535, 22.103, 0.0000, 0.0000, 0.0000, 0.0000],
[38.6135, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
])),
(
np.array([
[1, 0, 0],
[2, 3, 0],
[4, 5, 6.]
]),
np.array([
[0.4558, 0.3252, 0.8285],
[2.6211, 2.4759, 0.0000],
[8.7750, 0.0000, 0.0000]
])),
(
np.array([
[6, 0, 0],
[3, 2, 0],
[4, 1, 5.]
]),
np.array([
[3.7033, 4.7208, 0.0000],
[2.1602, 2.1183, 1.9612],
[6.4807, 0.0000, 0.0000]
])))
# pyformat: enable
def testOracleExamples(self, mat, oracle_pchol):
mat = np.matmul(mat, mat.T)
for rank in range(1, mat.shape[-1] + 1):
self.assertAllClose(
oracle_pchol[..., :rank],
tfp.math.pivoted_cholesky(mat, max_rank=rank, diag_rtol=-1),
atol=1e-4)
@test_util.run_all_in_graph_and_eager_modes
class PivotedCholesky32Static(_PivotedCholesky):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class PivotedCholesky64Dynamic(_PivotedCholesky):
dtype = np.float64
use_static_shape = False
del _PivotedCholesky
def make_tensor_hiding_attributes(value, hide_shape, hide_value=True):
if not hide_value:
return tf.convert_to_tensor(value=value)
shape = None if hide_shape else getattr(value, 'shape', None)
return tf1.placeholder_with_default(value, shape=shape)
class _LUReconstruct(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array(
[[3, 4], [1, 2]],
dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array(
[
[[3, 4], [1, 2]],
[[7, 8], [3, 4]],
],
dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUReconstructStatic(tf.test.TestCase, _LUReconstruct):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUReconstructDynamic(tf.test.TestCase, _LUReconstruct):
use_static_shape = False
class _LUMatrixInverse(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array([[1, 2], [3, 4]], dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array(
[
[[1, 2],
[3, 4]],
[[7, 8],
[3, 4]],
[[0.25, 0.5],
[0.75, -2.]],
],
dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUMatrixInverseStatic(tf.test.TestCase, _LUMatrixInverse):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUMatrixInverseDynamic(tf.test.TestCase, _LUMatrixInverse):
use_static_shape = False
class _LUSolve(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array(
[[1, 2],
[3, 4]],
dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
rhs_ = np.array([[1, 1]], dtype=self.dtype).T
rhs = tf1.placeholder_with_default(
rhs_, shape=rhs_.shape if self.use_static_shape else None)
lower_upper, perm = tf.linalg.lu(x)
y = tfp.math.lu_solve(lower_upper, perm, rhs, validate_args=True)
y_, perm_ = self.evaluate([y, perm])
self.assertAllEqual([1, 0], perm_)
expected_ = np.linalg.solve(x_, rhs_)
if self.use_static_shape:
self.assertAllEqual(expected_.shape, y.shape)
self.assertAllClose(expected_, y_, atol=0., rtol=1e-3)
def test_batch_broadcast(self):
x_ = np.array(
[
[[1, 2],
[3, 4]],
[[7, 8],
[3, 4]],
[[0.25, 0.5],
[0.75, -2.]],
],
dtype=self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
rhs_ = np.array([[1, 1]], dtype=self.dtype).T
rhs = tf1.placeholder_with_default(
rhs_, shape=rhs_.shape if self.use_static_shape else None)
lower_upper, perm = tf.linalg.lu(x)
y = tfp.math.lu_solve(lower_upper, perm, rhs, validate_args=True)
y_, perm_ = self.evaluate([y, perm])
self.assertAllEqual([[1, 0],
[0, 1],
[1, 0]], perm_)
expected_ = np.linalg.solve(x_, rhs_[np.newaxis])
if self.use_static_shape:
self.assertAllEqual(expected_.shape, y.shape)
self.assertAllClose(expected_, y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUSolveStatic(tf.test.TestCase, _LUSolve):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUSolveDynamic(tf.test.TestCase, _LUSolve):
use_static_shape = False
class _SparseOrDenseMatmul(object):
dtype = np.float32
use_static_shape = True
use_sparse_tensor = False
def _make_placeholder(self, x):
return tf1.placeholder_with_default(
x, shape=(x.shape if self.use_static_shape else None))
def _make_sparse_placeholder(self, x):
indices_placeholder = self._make_placeholder(x.indices)
values_placeholder = self._make_placeholder(x.values)
if self.use_static_shape:
dense_shape_placeholder = x.dense_shape
else:
dense_shape_placeholder = self._make_placeholder(x.dense_shape)
return tf.SparseTensor(
indices=indices_placeholder,
values=values_placeholder,
dense_shape=dense_shape_placeholder)
def verify_sparse_dense_matmul(self, x_, y_):
if self.use_sparse_tensor:
x = self._make_sparse_placeholder(tfp.math.dense_to_sparse(x_))
else:
x = self._make_placeholder(x_)
y = self._make_placeholder(y_)
z = tfp.math.sparse_or_dense_matmul(x, y)
z_ = self.evaluate(z)
if self.use_static_shape:
batch_shape = x_.shape[:-2]
self.assertAllEqual(z_.shape, batch_shape + (x_.shape[-2], y_.shape[-1]))
self.assertAllClose(z_, np.matmul(x_, y_), atol=0., rtol=1e-3)
def verify_sparse_dense_matvecmul(self, x_, y_):
if self.use_sparse_tensor:
x = self._make_sparse_placeholder(tfp.math.dense_to_sparse(x_))
else:
x = self._make_placeholder(x_)
y = self._make_placeholder(y_)
z = tfp.math.sparse_or_dense_matvecmul(x, y)
z_ = self.evaluate(z)
if self.use_static_shape:
batch_shape = x_.shape[:-2]
self.assertAllEqual(z_.shape, batch_shape + (x_.shape[-2],))
self.assertAllClose(
z_[..., np.newaxis],
np.matmul(x_, y_[..., np.newaxis]),
atol=0.,
rtol=1e-3)
def test_non_batch_matmul(self):
x_ = np.array([[3, 4, 0], [1, 0, 3]], dtype=self.dtype)
y_ = np.array([[1, 0], [9, 0], [3, 1]], dtype=self.dtype)
self.verify_sparse_dense_matmul(x_, y_)
def test_non_batch_matvecmul(self):
x_ = np.array([[3, 0, 5], [0, 2, 3]], dtype=self.dtype)
y_ = np.array([1, 0, 9], dtype=self.dtype)
self.verify_sparse_dense_matvecmul(x_, y_)
def test_batch_matmul(self):
x_ = np.array([
[[3, 4, 0], [1, 0, 3]],
[[6, 0, 0], [0, 0, 0]],
],
dtype=self.dtype)
y_ = np.array([
[[1, 0], [9, 0], [3, 1]],
[[2, 2], [5, 6], [0, 1]],
],
dtype=self.dtype)
self.verify_sparse_dense_matmul(x_, y_)
def test_batch_matvecmul(self):
x_ = np.array([
[[3, 0, 5], [0, 2, 3]],
[[1, 1, 0], [6, 0, 0]],
],
dtype=self.dtype)
y_ = np.array([
[1, 0, 9],
[0, 0, 2],
], dtype=self.dtype)
self.verify_sparse_dense_matvecmul(x_, y_)
@test_util.run_all_in_graph_and_eager_modes
class SparseOrDenseMatmulStatic(tf.test.TestCase, _SparseOrDenseMatmul):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class SparseOrDenseMatmulDynamic(tf.test.TestCase, _SparseOrDenseMatmul):
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class SparseOrDenseMatmulStaticSparse(tf.test.TestCase, _SparseOrDenseMatmul):
use_static_shape = True
use_sparse_tensor = True
@test_util.run_all_in_graph_and_eager_modes
class SparseOrDenseMatmulDynamicSparse(tf.test.TestCase, _SparseOrDenseMatmul):
use_static_shape = False
use_sparse_tensor = True
class _MatrixRankTest(object):
def test_batch_default_tolerance(self):
x_ = np.array([[[2, 3, -2], # = row2+row3
[-1, 1, -2],
[3, 2, 0]],
[[0, 2, 0], # = 2*row2
[0, 1, 0],
[0, 3, 0]], # = 3*row2
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]],
self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1, 3], self.evaluate(tfp.math.matrix_rank(x)))
def test_custom_tolerance_broadcasts(self):
q = tf.linalg.qr(tf.random.uniform([3, 3], dtype=self.dtype))[0]
e = tf.constant([0.1, 0.2, 0.3], dtype=self.dtype)
a = tf.linalg.solve(q, tf.transpose(a=e * q), adjoint=True)
self.assertAllEqual([3, 2, 1, 0], self.evaluate(tfp.math.matrix_rank(
a, tol=[[0.09], [0.19], [0.29], [0.31]])))
def test_nonsquare(self):
x_ = np.array([[[2, 3, -2, 2], # = row2+row3
[-1, 1, -2, 4],
[3, 2, 0, -2]],
[[0, 2, 0, 6], # = 2*row2
[0, 1, 0, 3],
[0, 3, 0, 9]]], # = 3*row2
self.dtype)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1], self.evaluate(tfp.math.matrix_rank(x)))
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankStatic32Test(tf.test.TestCase, _MatrixRankTest):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankDynamic64Test(tf.test.TestCase, _MatrixRankTest):
dtype = np.float64
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class FillTriangularTest(tf.test.TestCase):
def _fill_triangular(self, x, upper=False):
"""Numpy implementation of `fill_triangular`."""
x = np.asarray(x)
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1])
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError('Invalid shape.')
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],
axis=-1)
y = y.reshape(np.concatenate([
np.int32(x.shape[:-1]),
np.int32([n, n]),
], axis=0))
return np.triu(y) if upper else np.tril(y)
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
static_shape = None if use_deferred_shape else x_.shape
x_pl = tf1.placeholder_with_default(x_, shape=static_shape)
# Add `zeros_like(x)` such that x's value and gradient are identical. We
# do this so we can ensure each gradient value is mapped to the right
# gradient location. (Not doing this means the gradient wrt `x` is simple
# `ones_like(x)`.)
# Note:
# zeros_like_x_pl == zeros_like(x_pl)
# gradient(zeros_like_x_pl, x_pl) == x_pl - 1
def _zeros_like(x):
return x * tf.stop_gradient(x - 1.) - tf.stop_gradient(x * (x - 1.))
actual, grad_actual = tfp.math.value_and_gradient(
lambda x: tfp.math.fill_triangular( # pylint: disable=g-long-lambda
x + _zeros_like(x), **kwargs),
x_pl)
actual_, grad_actual_ = self.evaluate([actual, grad_actual])
expected = self._fill_triangular(x_, **kwargs)
if use_deferred_shape and not tf.executing_eagerly():
self.assertEqual(None, actual.shape)
else:
self.assertAllEqual(expected.shape, actual.shape)
self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
def testCorrectlyMakes1x1TriLower(self):
self._run_test(np.random.randn(3, int(1*2/2)))
def testCorrectlyMakesNoBatchTriLower(self):
self._run_test(np.random.randn(int(4*5/2)))
def testCorrectlyMakesBatchTriLower(self):
self._run_test(np.random.randn(2, 3, int(3*4/2)))
def testCorrectlyMakesBatchTriLowerUnknownShape(self):
self._run_test(np.random.randn(2, 3, int(3*4/2)), use_deferred_shape=True)
def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):
self._run_test(np.random.randn(2, 3, int(7*8/2)), use_deferred_shape=True)
def testCorrectlyMakesBatch7x7TriLower(self):
self._run_test(np.random.randn(2, 3, int(7*8/2)))
def testCorrectlyMakes1x1TriUpper(self):
self._run_test(np.random.randn(3, int(1*2/2)), upper=True)
def testCorrectlyMakesNoBatchTriUpper(self):
self._run_test(np.random.randn(int(4*5/2)), upper=True)
def testCorrectlyMakesBatchTriUpper(self):
self._run_test(np.random.randn(2, 2, int(3*4/2)), upper=True)
def testCorrectlyMakesBatchTriUpperUnknownShape(self):
self._run_test(np.random.randn(2, 2, int(3*4/2)),
use_deferred_shape=True,
upper=True)
def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):
self._run_test(np.random.randn(2, 3, int(7*8/2)),
use_deferred_shape=True,
upper=True)
def testCorrectlyMakesBatch7x7TriUpper(self):
self._run_test(np.random.randn(2, 3, int(7*8/2)), upper=True)
@test_util.run_all_in_graph_and_eager_modes
class FillTriangularInverseTest(FillTriangularTest):
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
static_shape = None if use_deferred_shape else x_.shape
x_pl = tf1.placeholder_with_default(x_, shape=static_shape)
zeros_like_x_pl = (x_pl * tf.stop_gradient(x_pl - 1.)
- tf.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = tfp.math.fill_triangular(x, **kwargs)
inverse_actual = tfp.math.fill_triangular_inverse(actual, **kwargs)
inverse_actual_ = self.evaluate(inverse_actual)
if use_deferred_shape and not tf.executing_eagerly():
self.assertEqual(None, inverse_actual.shape)
else:
self.assertAllEqual(x_.shape, inverse_actual.shape)
self.assertAllEqual(x_, inverse_actual_)
if __name__ == '__main__':
tf.test.main()
| 33.872774 | 115 | 0.644231 |
bf628907100ed45f03c6ba2481f962223005d12b | 1,945 | py | Python | tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py | drov0/hive | 747380ac6d1d621a99c94ccf3fd24bbece754a57 | [
"MIT"
] | 283 | 2020-03-20T02:13:12.000Z | 2022-03-31T22:40:07.000Z | tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py | drov0/hive | 747380ac6d1d621a99c94ccf3fd24bbece754a57 | [
"MIT"
] | 19 | 2020-03-20T03:09:16.000Z | 2021-08-28T22:35:09.000Z | tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py | drov0/hive | 747380ac6d1d621a99c94ccf3fd24bbece754a57 | [
"MIT"
] | 94 | 2020-03-20T01:53:05.000Z | 2022-03-04T11:08:23.000Z | #!/usr/bin/python3
import time
from utils.test_utils import *
from utils.cmd_args import args
from utils.cli_wallet import CliWallet
from utils.logger import log, init_logger
if __name__ == "__main__":
with Test(__file__):
with CliWallet( args ) as wallet:
creator, user = make_user_for_tests(wallet)
result_before = wallet.get_open_orders(user)['result']
assert(len(result_before) == 0)
log.info( "testing buy order :10.000 TESTS for 1000.000 TBD created by user {}".format( user ) )
wallet.create_order(user, "1", "10.000 TESTS", "1000.000 TBD", "false", "9999", "true")
result_sell = wallet.get_open_orders(user)['result']
assert(len(result_sell) == 1)
assert(result_sell[0]['orderid'] == 1)
assert(result_sell[0]['seller'] == user)
assert(result_sell[0]['for_sale'] == 10000)
assert(result_sell[0]['real_price'] == '100.00000000000000000')
assert(result_sell[0]['sell_price']['base'] == '10.000 TESTS')
assert(result_sell[0]['sell_price']['quote'] == '1000.000 TBD')
assert(not result_sell[0]['rewarded'])
log.info( "testing buy order :10.000 TBD for 1000.000 TESTS created by user {}".format( user ) )
wallet.create_order(user, "2", "10.000 TBD", "1000.000 TESTS", "false", "9999", "true")
result_buy = wallet.get_open_orders(user)['result']
assert(len(result_buy) == 2)
assert(result_buy[1]['orderid'] == 2)
assert(result_buy[1]['seller'] == user)
assert(result_buy[1]['for_sale'] == 10000)
assert(result_buy[1]['real_price'] == '0.01000000000000000')
assert(result_buy[1]['sell_price']['base'] == '10.000 TBD')
assert(result_buy[1]['sell_price']['quote'] == '1000.000 TESTS')
assert(not result_buy[1]['rewarded'])
| 47.439024 | 108 | 0.597943 |
29e2d85ba89fbd087f080618a4c9b26454eeac13 | 5,258 | py | Python | flsim/utils/tests/test_training_time_estimator.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
] | 79 | 2021-12-09T18:05:09.000Z | 2022-03-23T20:43:46.000Z | flsim/utils/tests/test_training_time_estimator.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
] | 11 | 2021-12-30T17:54:04.000Z | 2022-03-23T17:23:00.000Z | flsim/utils/tests/test_training_time_estimator.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
] | 9 | 2021-12-09T19:55:22.000Z | 2022-03-15T00:02:08.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertEqual, assertAlmostEqual
from flsim.utils.timing.training_duration_distribution import (
PerUserUniformDurationDistribution,
PerUserUniformDurationDistributionConfig,
PerUserHalfNormalDurationDistribution,
PerUserHalfNormalDurationDistributionConfig,
DurationDistributionFromListConfig,
DurationDistributionFromList,
DurationInfo,
)
from flsim.utils.timing.training_time_estimator import (
get_training_time,
AsyncTrainingTimeEstimator,
SyncTrainingTimeEstimator,
)
from omegaconf import OmegaConf
class TestTrainingTimeEstimator:
def test_time_from_list(self) -> None:
"""
Test training time from list
Assuming UPR = 2
Sync would be the sum of slowest user between rounds
round 1
user_1: duration = 4
user_2: duration = 3
round 2
user_3: duration = 2
user_4: duration = 1
total = 4 + 2 = 6
Async would be the
user_1: duration = 4, start_time = 1
user_2: duration = 3, start_time = 1
user_3: duration = 2, start_time = 2
user_4: duration = 1, start_time = 3
users training @ time 1: user 1, user 2
users training @ time 3: user 2, user 3
users training @ time 4: user 3, user 4
users training @ time 5: user 4 finishes training
"""
training_events = [
DurationInfo(duration=4),
DurationInfo(duration=3),
DurationInfo(duration=2),
DurationInfo(duration=1),
]
async_start_times = [1, 1, 2, 3]
sync_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
async_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
num_users = len(training_events)
epochs = 1
users_per_round = 2
sync_estimator = SyncTrainingTimeEstimator(
total_users=len(training_events),
users_per_round=users_per_round,
epochs=epochs,
training_dist=sync_training_dist,
)
async_estimator = AsyncTrainingTimeEstimator(
total_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=async_training_dist,
start_times=async_start_times,
)
async_time = async_estimator.training_time()
sync_time = sync_estimator.training_time()
assertEqual(sync_time, 6)
assertEqual(async_time, 5)
def test_uniform_training_time(self) -> None:
"""
Test uniform training time
Sync and Async should have the same training time if
UPR = 1 and duration_min close to duration_mean
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_mean = 1.00
duration_min = 0.99999
training_dist = PerUserUniformDurationDistribution(
**OmegaConf.structured(
PerUserUniformDurationDistributionConfig(
training_duration_mean=duration_mean,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
def test_per_user_half_normal(self) -> None:
"""
Test half normal training time
Sync and Async should have the following training time
sync_training_time = async_training_time = num_users * duration_min
if UPR = 1 and duraton_std is close to 0
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_std = 1e-6
duration_min = 1.0
training_dist = PerUserHalfNormalDurationDistribution(
**OmegaConf.structured(
PerUserHalfNormalDurationDistributionConfig(
training_duration_sd=duration_std,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
assertAlmostEqual(sync_time, num_users * duration_min, delta=1e-3)
assertAlmostEqual(async_time, num_users * duration_min, delta=1e-3)
| 32.257669 | 83 | 0.625333 |
ce94faadb39823e06566cf7c720f348a448bb628 | 1,470 | py | Python | nicos_demo/vsans1/setups/pressure.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_demo/vsans1/setups/pressure.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_demo/vsans1/setups/pressure.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Vacuum sensors of detector and collimation tube'
group = 'lowlevel'
devices = dict(
det_tube = device('nicos.devices.generic.ManualMove',
description = 'pressure detector tube: Tube',
abslimits = (0, 1000),
fmtstr = '%.4G',
pollinterval = 15,
maxage = 60,
lowlevel = True,
unit = 'mbar',
),
det_nose = device('nicos.devices.generic.ManualMove',
description = 'pressure detector tube: Nose',
abslimits = (0, 1000),
fmtstr = '%.4G',
pollinterval = 15,
maxage = 60,
lowlevel = True,
unit = 'mbar',
),
coll_tube = device('nicos.devices.generic.ManualMove',
description = 'pressure collimation tube: Tube',
abslimits = (0, 1000),
fmtstr = '%.4G',
pollinterval = 15,
maxage = 60,
lowlevel = True,
unit = 'mbar',
),
coll_nose = device('nicos.devices.generic.ManualMove',
description = 'pressure collimation tube: Nose',
abslimits = (0, 1000),
fmtstr = '%.4G',
pollinterval = 15,
maxage = 60,
lowlevel = True,
unit = 'mbar',
),
coll_pump = device('nicos.devices.generic.ManualMove',
description = 'pressure collimation tube: Pump',
abslimits = (0, 1000),
fmtstr = '%.4G',
pollinterval = 15,
maxage = 60,
lowlevel = True,
unit = 'mbar',
),
)
| 28.269231 | 63 | 0.542857 |
0433d8a6fe3adde21da874f20482a09af670d149 | 3,366 | py | Python | neurolang/utils/testing/logic.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T02:00:22.000Z | 2021-01-07T02:00:22.000Z | neurolang/utils/testing/logic.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 207 | 2020-11-04T12:51:10.000Z | 2022-03-30T13:42:26.000Z | neurolang/utils/testing/logic.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 6 | 2020-11-04T13:59:35.000Z | 2021-03-19T05:28:10.000Z | """
This module exposes utility functions for tests on logic expressions.
It should not be used for any other purpose than testing.
"""
from ...expression_pattern_matching import add_match
from ...expression_walker import ExpressionWalker
from ...expressions import Definition, Expression
from ...logic import NaryLogicOperator
__all__ = [
"logic_exp_commutative_equal",
]
class LogicCommutativeComparison(Definition):
"""
Comparison between two expressions that uses the commutativity property of
some logic operators such as conjunctions and disjunctions.
Parameters
----------
first : Expression
First expression.
second : Expression
Second expression.
"""
def __init__(self, first, second):
self.first = first
self.second = second
def __repr__(self):
return "Compare\n\t{}\nwith\n\t{}".format(
repr(self.first), repr(self.second)
)
class LogicCommutativeComparator(ExpressionWalker):
"""
Compare logic expressions using the commutativity property of some logic
operators such as conjunctions and disjunctions.
"""
@add_match(
LogicCommutativeComparison(NaryLogicOperator, NaryLogicOperator)
)
def nary_logic_operators(self, comp):
"""
Compare two n-ary logic operators by comparing their two sets of
formulas.
"""
if not isinstance(comp.first, type(comp.second)) or not isinstance(
comp.second, type(comp.first)
):
return False
return self._compare_set_of_formulas(comp.first, comp.second)
@add_match(LogicCommutativeComparison(Expression, Expression))
def expressions(self, comp):
args1 = comp.first.unapply()
args2 = comp.second.unapply()
if len(args1) != len(args2):
return False
for arg1, arg2 in zip(args1, args2):
if not self._args_equal(arg1, arg2):
return False
return True
def _args_equal(self, arg1, arg2):
if isinstance(arg1, Expression) and isinstance(arg2, Expression):
if not self.walk(LogicCommutativeComparison(arg1, arg2)):
return False
elif arg1 != arg2:
return False
return True
def _compare_set_of_formulas(self, first, second):
return all(
any(
self.walk(LogicCommutativeComparison(f1, f2))
for f2 in second.formulas
)
for f1 in first.formulas
)
def logic_exp_commutative_equal(exp1, exp2):
"""
Compare two expressions using the commutativity property of logic
operators.
The two expressions do not need to be purely equal if the order of the
formulas of a commutative logic operator is not the same in the two
expressions.
Apart from commutative logic operators, the comparison between the two
expressions remains the same as the equality comparison.
Parameters
----------
exp1 : Expression
First expression.
exp2 : Expression
Second expression.
"""
if not isinstance(exp1, Expression) or not isinstance(exp2, Expression):
raise ValueError("Can only compare expressions")
return LogicCommutativeComparator().walk(
LogicCommutativeComparison(exp1, exp2)
)
| 28.285714 | 78 | 0.655674 |
15e199b22e341cb7cab56a47709641d697da9e73 | 2,734 | py | Python | tests/test_edn.py | ciena-blueplanet/pydatomic | 6e49d5a4d9716392eaeb8647e1da21eb300d5380 | [
"MIT"
] | 56 | 2015-01-14T16:38:37.000Z | 2022-02-24T10:54:53.000Z | tests/test_edn.py | ciena-blueplanet/pydatomic | 6e49d5a4d9716392eaeb8647e1da21eb300d5380 | [
"MIT"
] | null | null | null | tests/test_edn.py | ciena-blueplanet/pydatomic | 6e49d5a4d9716392eaeb8647e1da21eb300d5380 | [
"MIT"
] | 10 | 2015-01-27T02:53:03.000Z | 2021-12-06T11:30:24.000Z | # -*- coding: utf-8 -*-
import unittest
from datetime import datetime
from uuid import UUID
from pydatomic import edn
class EdnParseTest(unittest.TestCase):
def test_all_data(self):
data = {
'"helloworld"': "helloworld",
"23": 23,
"23.11": 23.11,
"true": True,
"false": False,
"nil": None,
":hello": ":hello",
r'"string\"ing"': 'string"ing',
'"string\n"': 'string\n',
'[:hello]':(":hello",),
'-10.4':-10.4,
'"你"': u'你',
'\\€': u'€',
"[1 2]": (1, 2),
"#{true \"hello\" 12}": set([True, "hello", 12]),
'#inst "2012-09-10T23:51:55.840-00:00"': datetime(2012, 9, 10, 23, 51, 55, 840000),
"(\\a \\b \\c \\d)": ("a", "b", "c", "d"),
"{:a 1 :b 2 :c 3 :d 4}": {":a":1, ":b":2, ":c":3,":d":4},
"[1 2 3,4]": (1,2,3,4),
"{:a [1 2 3] :b #{23.1 43.1 33.1}}": {":a":(1, 2, 3), ":b":frozenset([23.1, 43.1, 33.1])},
"{:a 1 :b [32 32 43] :c 4}": {":a":1, ":b":(32,32,43), ":c":4},
"\\你": u"你",
'#db/fn{:lang "clojure" :code "(map l)"}': {':lang':u'clojure', ':code':u'(map l)'},
"#_ {[#{}] #{[]}} [23[34][32][4]]": (23, (34,), (32,), (4,)),
'(:graham/stratton true \n , "A string with \\n \\"s" true #uuid "f81d4fae7dec11d0a76500a0c91e6bf6")': (
u':graham/stratton', True, u'A string with \n "s', True, UUID('f81d4fae-7dec-11d0-a765-00a0c91e6bf6')
),
'[\space \\\xE2\x82\xAC [true []] ;true\n[true #inst "2012-09-10T23:39:43.309-00:00" true ""]]': (
' ', u'\u20ac', (True, ()), (True, datetime(2012, 9, 10, 23, 39, 43, 309000), True, '')
),
' {true false nil [true, ()] 6 {#{nil false} {nil \\newline} }}': {
None: (True, ()), True: False, 6: {frozenset([False, None]): {None: '\n'}}
},
'[#{6.22e-18, -3.1415, 1} true #graham #{"pie" "chips"} "work"]': (
frozenset([6.22e-18, -3.1415, 1]), True, u'work'
),
'(\\a .5)': (u'a', 0.5),
'(List #{[123 456 {}] {a 1 b 2 c ({}, [])}})': (
u'List', ((123, 456, {}), {u'a': 1, u'c': ({}, ()), u'b': 2})
),
}
for k, v in data.items():
self.assertEqual(edn.loads(k), v)
def test_malformed_data(self):
'''Verify ValueError() exception raise on malformed data'''
data = ["[1 2 3", "@EE", "[@nil tee]"]
for d in data:
self.assertRaises(ValueError, edn.loads, d)
if __name__ == '__main__':
unittest.main()
| 41.424242 | 117 | 0.41368 |
3ad157ffd25a76d559494e3b24db09b4d1ba2ef8 | 1,032 | py | Python | ophelia/voicerooms/config_options.py | Bunnic/Ophelia | 7a521ca8cef1e067b6e402db16911b554057ce0d | [
"MIT"
] | null | null | null | ophelia/voicerooms/config_options.py | Bunnic/Ophelia | 7a521ca8cef1e067b6e402db16911b554057ce0d | [
"MIT"
] | null | null | null | ophelia/voicerooms/config_options.py | Bunnic/Ophelia | 7a521ca8cef1e067b6e402db16911b554057ce0d | [
"MIT"
] | null | null | null | """
Voicerooms Configuration module.
Contains the options required to set up a voiceroom generator.
"""
from typing import List
from ophelia.output import ConfigItem, disp_str
from ophelia.utils.discord_utils import (
extract_category_config, extract_text_config,
extract_voice_config
)
VOICEROOMS_GENERATOR_CONFIG: List[ConfigItem] = []
for category in ["voice_category", "text_category"]:
VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem(
category,
disp_str(f"voicerooms_generator_{category}"),
extract_category_config
))
for voice_channel in ["generator_channel", "sample_voice_channel"]:
VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem(
voice_channel,
disp_str(f"voicerooms_generator_{voice_channel}"),
extract_voice_config
))
for text_channel in ["sample_text_channel", "log_channel"]:
VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem(
text_channel,
disp_str(f"voicerooms_generator_{text_channel}"),
extract_text_config
))
| 27.157895 | 67 | 0.745155 |
17d8273c73888cc04c224429611b598d929de315 | 1,262 | py | Python | regression_test_utils/regression_test_utils.py | JivanAmara/test_utils | f077083ebdd8cbcd626ef98994c582cf585fde14 | [
"BSD-3-Clause"
] | null | null | null | regression_test_utils/regression_test_utils.py | JivanAmara/test_utils | f077083ebdd8cbcd626ef98994c582cf585fde14 | [
"BSD-3-Clause"
] | null | null | null | regression_test_utils/regression_test_utils.py | JivanAmara/test_utils | f077083ebdd8cbcd626ef98994c582cf585fde14 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Jul 29, 2015
@author: jivan
'''
import jsonpickle, logging
# PythonDecorators/my_decorator.py
class log_test_case(object):
""" @brief: Decorator to log input & output of a method as a jsonpickle'd tuple for easy
test creation.
Format of the tuple is (<method name>, <args (without self)>, <kwargs>, <result>)
@author: Jivan
@since: 2015-07-29
@change: 2015-08-03 by Jivan: Added class_name to initialization & logged output.
"""
def __init__(self, logger, class_name):
self.logger = logger
self.class_name = class_name
def __call__(self, f):
method_name = f.__name__
logger = self.logger
def wrapped_f(*args, **kwargs):
result = f(*args, **kwargs)
if logger.getEffectiveLevel() <= logging.DEBUG:
args_wo_instance = args[1:]
tc = repr(jsonpickle.encode(
(method_name, args_wo_instance, kwargs, result), keys=True
)
)
logger.debug('Decorator TestCase for "{}.{}":\n\t{}'\
.format(self.class_name, method_name, tc))
return result
return wrapped_f
| 35.055556 | 92 | 0.561014 |
095a8c4c739fb420c16da1e1ae8240d1d72e1c59 | 798 | py | Python | Python/Assignments/week3.py | aquib-sh/DSA-C-PY | 0cc9e874d5310762edd7b6c12dee07e351668c17 | [
"CC0-1.0"
] | null | null | null | Python/Assignments/week3.py | aquib-sh/DSA-C-PY | 0cc9e874d5310762edd7b6c12dee07e351668c17 | [
"CC0-1.0"
] | null | null | null | Python/Assignments/week3.py | aquib-sh/DSA-C-PY | 0cc9e874d5310762edd7b6c12dee07e351668c17 | [
"CC0-1.0"
] | null | null | null | def remdup(li):
length = len(li)
holder = []
if length <= 1:
return li
for i in range(0, length):
if i == length-1:
holder.append(li[i])
else:
if not li[i] in li[(i+1):]:
holder.append(li[i])
return holder
def splitsum(l):
pos = 0
neg = 0
for i in range(0, len(l)):
if l[i] < 0:
neg += l[i]**3
else:
pos += l[i]**2
return [pos, neg]
def matrixflip(m, d):
nm = []
if d == 'h':
for elem in m:
nm.append([elem[i] for i in range(len(elem)-1, -1, -1)])
if d == 'v':
for i in range(len(m)-1, -1, -1):
nm.append(m[i])
return nm
| 17.733333 | 68 | 0.384712 |
b0e650d33133e60c097f26b1e8671202dfc39782 | 4,133 | py | Python | api/streamlit_experiments/s3.py | aws-samples/aws-open-data-analytics-notebooks | 680e9689e1b0ceb047960662d220564ae3ecbddb | [
"Apache-2.0"
] | 70 | 2019-05-09T20:02:13.000Z | 2021-04-03T12:09:18.000Z | api/streamlit_experiments/s3.py | aws-samples/cloud-experiments | 680e9689e1b0ceb047960662d220564ae3ecbddb | [
"Apache-2.0"
] | 14 | 2021-05-15T21:14:28.000Z | 2022-03-31T09:09:11.000Z | api/streamlit_experiments/s3.py | aws-samples/aws-open-data-analytics-notebooks | 680e9689e1b0ceb047960662d220564ae3ecbddb | [
"Apache-2.0"
] | 65 | 2019-05-20T00:48:04.000Z | 2021-04-24T02:28:08.000Z | import streamlit as st
import boto3
import botocore
import pandas as pd
import io
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
def search_buckets():
search = st.text_input('Search S3 bucket in your account', '')
response = s3_client.list_buckets()
if search:
buckets_found = 0
for bucket in response['Buckets']:
if search:
if search in bucket["Name"]:
buckets_found = buckets_found + 1
st.write(f'{bucket["Name"]}')
if buckets_found:
st.success(f'Listing existing **{buckets_found}** buckets containing **{search}** string')
else:
st.warning(f'No matching buckets found containing **{search}** string')
else:
st.info('Provide string to search for listing buckets')
def list_bucket_contents():
total_size_gb = 0
total_files = 0
match_size_gb = 0
match_files = 0
bucket = st.text_input('S3 bucket name (public bucket or private to your account)', '')
bucket_resource = s3_resource.Bucket(bucket)
match = st.text_input('(optional) Filter bucket contents with matching string', '')
size_mb = st.text_input('(optional) Match files up to size in MB (0 for all sizes)', '0')
if size_mb:
size_mb = int(size_mb)
else:
size_mb = 0
if bucket:
for key in bucket_resource.objects.all():
key_size_mb = key.size/1024/1024
total_size_gb += key_size_mb
total_files += 1
list_check = False
if not match:
list_check = True
elif match in key.key:
list_check = True
if list_check and not size_mb:
match_files += 1
match_size_gb += key_size_mb
st.write(f'{key.key} ({key_size_mb:3.0f}MB)')
elif list_check and key_size_mb <= size_mb:
match_files += 1
match_size_gb += key_size_mb
st.write(f'{key.key} ({key_size_mb:3.0f}MB)')
if match:
st.info(f'Matched file size is **{match_size_gb/1024:3.1f}GB** with **{match_files}** files')
st.success(f'Bucket **{bucket}** total size is **{total_size_gb/1024:3.1f}GB** with **{total_files}** files')
else:
st.info('Provide bucket name to list contents')
def create_bucket():
bucket = st.text_input('S3 bucket name to create', '')
if bucket:
try:
s3_client.create_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as e:
st.error('Bucket **' + bucket + '** could not be created. ' + e.response['Error']['Message'])
return
st.success('The S3 bucket **' + bucket + '** successfully created or already exists in your account')
else:
st.info('Provide unique bucket name to create')
def s3_select():
bucket = st.text_input('S3 bucket name', '')
csv = st.text_input('CSV File path and name', '')
st.write("Example: `SELECT * FROM s3object s LIMIT 5`")
sql = st.text_area('SQL statement', '')
if bucket and csv and sql:
s3_select_results = s3_client.select_object_content(
Bucket=bucket,
Key=csv,
Expression=sql,
ExpressionType='SQL',
InputSerialization={'CSV': {"FileHeaderInfo": "Use"}},
OutputSerialization={'JSON': {}},
)
for event in s3_select_results['Payload']:
if 'Records' in event:
df = pd.read_json(io.StringIO(event['Records']['Payload'].decode('utf-8')), lines=True)
elif 'Stats' in event:
st.write(f"Scanned: {int(event['Stats']['Details']['BytesScanned'])/1024/1024:5.2f}MB")
st.write(f"Processed: {int(event['Stats']['Details']['BytesProcessed'])/1024/1024:5.2f}MB")
st.write(f"Returned: {int(event['Stats']['Details']['BytesReturned'])/1024/1024:5.2f}MB")
st.write(df)
else:
st.info('Provide S3 bucket, CSV file name, and SQL statement') | 39.361905 | 117 | 0.583837 |
0e506a262abbfab83584566410dfe7ec665436a4 | 4,172 | py | Python | tests/unit/bokeh/models/test_mappers.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/models/test_mappers.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/models/test_mappers.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from _util_models import check_properties_existence
from bokeh.palettes import Spectral6
# Module under test
import bokeh.models.mappers as bmm # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_CategoricalColorMapper:
def test_basic(self) -> None:
mapper = bmm.CategoricalColorMapper()
check_properties_existence(mapper, [
"factors",
"palette",
"start",
"end",
"nan_color"],
)
def test_warning_with_short_palette(self, recwarn) -> None:
bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green"])
assert len(recwarn) == 1
def test_no_warning_with_long_palette(self, recwarn) -> None:
bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green", "orange", "blue"])
assert len(recwarn) == 0
def test_with_pandas_index(self, pd) -> None:
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
df = pd.DataFrame(data, index=fruits)
fruits = df.index
years = df.columns
m = bmm.CategoricalColorMapper(palette=Spectral6, factors=years, start=1, end=2)
assert list(m.factors) == list(years)
assert isinstance(m.factors, pd.Index)
class Test_CategoricalPatternMapper:
def test_basic(self) -> None:
mapper = bmm.CategoricalPatternMapper()
check_properties_existence(mapper, [
"factors",
"patterns",
"start",
"end",
"default_value"],
)
class Test_CategoricalMarkerMapper:
def test_basic(self) -> None:
mapper = bmm.CategoricalMarkerMapper()
check_properties_existence(mapper, [
"factors",
"markers",
"start",
"end",
"default_value"],
)
class Test_LinearColorMapper:
def test_basic(self) -> None:
mapper = bmm.LinearColorMapper()
check_properties_existence(mapper, [
"palette",
"low",
"high",
"low_color",
"high_color",
"nan_color"],
)
class Test_LogColorMapper:
def test_basic(self) -> None:
mapper = bmm.LogColorMapper()
check_properties_existence(mapper, [
"palette",
"low",
"high",
"low_color",
"high_color",
"nan_color"],
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 32.59375 | 103 | 0.394775 |
a4e32ef9c8adc091f8f4325ae63ce3419162c50b | 3,647 | py | Python | genedisco/evaluation/hitratio.py | genedisco/genedisco | 26b7ce93b222fd80e914f2f2236969b356e7f701 | [
"Apache-2.0"
] | 11 | 2022-02-07T13:19:02.000Z | 2022-03-25T03:38:15.000Z | genedisco/evaluation/hitratio.py | genedisco/genedisco | 26b7ce93b222fd80e914f2f2236969b356e7f701 | [
"Apache-2.0"
] | 4 | 2022-02-05T19:12:30.000Z | 2022-03-18T09:12:35.000Z | genedisco/evaluation/hitratio.py | genedisco/genedisco | 26b7ce93b222fd80e914f2f2236969b356e7f701 | [
"Apache-2.0"
] | 6 | 2022-02-07T16:14:54.000Z | 2022-03-18T22:26:31.000Z | """
Copyright (C) 2022 Arash Mehrjou, GlaxoSmithKline plc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import pickle
import numpy as np
from typing import Optional, AnyStr
from slingpy.evaluation.metrics.abstract_metric import AbstractMetric
class HitRatio(AbstractMetric):
"""
A metric to measure the ratio of the top mover genes selected by the acquisition function.
"""
def get_abbreviation(self) -> AnyStr:
return "HR"
@staticmethod
def evaluate(top_movers_filepath:AnyStr, super_dir_to_cycle_dirs: AnyStr) -> np.ndarray:
with open(top_movers_filepath, "rb") as f:
top_mover_indices = pickle.load(f)
top_mover_set = set(top_mover_indices)
num_top_hits = len(top_mover_indices)
num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs)
selected_indices_per_cycle = get_cumulative_selected_indices(
super_dir_to_cycle_dirs)
cumulative_top_hit_ratio = []
for c in range(num_AL_cycles):
selected_indices = selected_indices_per_cycle[c]
num_of_hits = num_top_hits - len(top_mover_set - set(selected_indices))
cumulative_top_hit_ratio.append(num_of_hits/num_top_hits)
return cumulative_top_hit_ratio[-1] # returns the top hit ratio of the current cycle
def get_cumulative_selected_indices(super_dir_to_cycle_dirs: AnyStr):
""" Get a list of selected indiced at cycles of active learning.
Args:
super_dir_to_cycle_dirs: The dir in which the cycle dirs are saved.
seed: The seed of the experiment.
Return a concatenated list of the saved selected indices so far.
"""
num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs)
selected_indices_per_cycles = []
for c in range(num_AL_cycles):
filename = os.path.join(super_dir_to_cycle_dirs, "cycle_" + str(c), "selected_indices.pickle")
with open(filename, "rb") as f:
selected_indices = pickle.load(f)
# selected_indices = [x.decode("utf-8") for x in selected_indices] # Uncomment this line if the stored Gene names are byte strings.
selected_indices_per_cycles.append(selected_indices)
return selected_indices_per_cycles
def get_num_AL_cycles(super_dir_to_cycle_dirs: AnyStr):
"""Get the number of cycles stored in the provided dir.
"""
all_subdirs = list(os.walk(super_dir_to_cycle_dirs))[0][1]
cycle_subdirs = [folder_name for folder_name in all_subdirs if folder_name.startswith("cycle")]
num_AL_cycles = len(cycle_subdirs)
return num_AL_cycles | 47.986842 | 143 | 0.732931 |
12fe26a4af0f0a8758ed418b3d06127b37fa4ad8 | 920 | py | Python | manager/projects/migrations/0016_auto_20201016_0326.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 30 | 2016-03-26T12:08:04.000Z | 2021-12-24T14:48:32.000Z | manager/projects/migrations/0016_auto_20201016_0326.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 1,250 | 2016-03-23T04:56:50.000Z | 2022-03-28T02:27:58.000Z | manager/projects/migrations/0016_auto_20201016_0326.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 11 | 2016-07-14T17:04:20.000Z | 2021-07-01T16:19:09.000Z | # Generated by Django 3.1.2 on 2020-10-16 03:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0015_auto_20201007_0337'),
]
operations = [
migrations.RemoveField(
model_name='googledrivesource',
name='folder_id',
),
migrations.AddField(
model_name='googledrivesource',
name='google_id',
field=models.TextField(default='', help_text='The id of the file or folder.'),
preserve_default=False,
),
migrations.AddField(
model_name='googledrivesource',
name='kind',
field=models.CharField(choices=[('file', 'File'), ('folder', 'Folder')], default='folder', help_text='The kind of Google Drive resource: file or folder.', max_length=16),
preserve_default=False,
),
]
| 30.666667 | 182 | 0.594565 |
a201ac4aa8fba548a2db478ed74b26f9d6a8d17b | 12,945 | py | Python | mvpnet/train_3d.py | shnhrtkyk/mvpnet | cadf636749b5ee6e73e96ff68e4b32728088decd | [
"MIT"
] | 79 | 2020-01-12T20:30:34.000Z | 2022-03-15T06:37:09.000Z | mvpnet/train_3d.py | jtpils/mvpnet | cadf636749b5ee6e73e96ff68e4b32728088decd | [
"MIT"
] | 4 | 2020-02-14T17:26:56.000Z | 2021-08-30T07:54:47.000Z | mvpnet/train_3d.py | jtpils/mvpnet | cadf636749b5ee6e73e96ff68e4b32728088decd | [
"MIT"
] | 10 | 2020-01-13T05:59:15.000Z | 2021-11-02T03:00:22.000Z | #!/usr/bin/env python
import os
import os.path as osp
import sys
import argparse
import logging
import time
import socket
import warnings
import open3d # import before torch
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
# Assume that the script is run at the root directory
_ROOT_DIR = os.path.abspath(osp.dirname(__file__) + '/..')
sys.path.insert(0, _ROOT_DIR)
from common.solver.build import build_optimizer, build_scheduler
from common.nn.freezer import Freezer
from common.utils.checkpoint import CheckpointerV2
from common.utils.logger import setup_logger
from common.utils.metric_logger import MetricLogger
from common.utils.torch_util import set_random_seed
from common.utils.sampler import IterationBasedBatchSampler
from mvpnet.models.build import build_model_sem_seg_3d
from mvpnet.data.build import build_dataloader
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch 3D Deep Learning Training')
parser.add_argument(
'--cfg',
dest='config_file',
default='',
metavar='FILE',
help='path to config file',
type=str,
)
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train(cfg, output_dir='', run_name=''):
# ---------------------------------------------------------------------------- #
# Build models, optimizer, scheduler, checkpointer, etc.
# It is recommended not to modify this section.
# ---------------------------------------------------------------------------- #
logger = logging.getLogger('mvpnet.train')
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, train_metric, val_metric = build_model_sem_seg_3d(cfg)
logger.info('Build model:\n{}'.format(str(model)))
num_params = sum(param.numel() for param in model.parameters())
print('#Parameters: {:.2e}'.format(num_params))
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
model = nn.DataParallel(model).cuda()
elif num_gpus == 1:
model = model.cuda()
else:
raise NotImplementedError('Not support cpu training now.')
# build optimizer
# model_cfg = cfg.MODEL[cfg.MODEL.TYPE]
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
# Note that checkpointer will load state_dict of model, optimizer and scheduler.
checkpointer = CheckpointerV2(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger,
max_to_keep=cfg.TRAIN.MAX_TO_KEEP)
checkpoint_data = checkpointer.load(cfg.RESUME_PATH, resume=cfg.AUTO_RESUME, resume_states=cfg.RESUME_STATES)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build freezer
if cfg.TRAIN.FROZEN_PATTERNS:
freezer = Freezer(model, cfg.TRAIN.FROZEN_PATTERNS)
freezer.freeze(verbose=True) # sanity check
else:
freezer = None
# build data loader
# Reset the random seed again in case the initialization of models changes the random state.
set_random_seed(cfg.RNG_SEED)
train_dataloader = build_dataloader(cfg, mode='train')
val_period = cfg.VAL.PERIOD
val_dataloader = build_dataloader(cfg, mode='val') if val_period > 0 else None
# build tensorboard logger (optionally by comment)
if output_dir:
tb_dir = osp.join(output_dir, 'tb.{:s}'.format(run_name))
summary_writier = SummaryWriter(tb_dir)
else:
summary_writier = None
# ---------------------------------------------------------------------------- #
# Train
# Customization begins here.
# ---------------------------------------------------------------------------- #
max_iteration = cfg.SCHEDULER.MAX_ITERATION
start_iteration = checkpoint_data.get('iteration', 0)
best_metric_name = 'best_{}'.format(cfg.VAL.METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info('Start training from iteration {}'.format(start_iteration))
# add metrics
if not isinstance(train_metric, (list, tuple)):
train_metric = [train_metric]
if not isinstance(val_metric, (list, tuple)):
val_metric = [val_metric]
train_metric_logger = MetricLogger(delimiter=' ')
train_metric_logger.add_meters(train_metric)
val_metric_logger = MetricLogger(delimiter=' ')
val_metric_logger.add_meters(val_metric)
# wrap the dataloader
batch_sampler = train_dataloader.batch_sampler
train_dataloader.batch_sampler = IterationBasedBatchSampler(batch_sampler, max_iteration, start_iteration)
def setup_train():
# set training mode
model.train()
loss_fn.train()
# freeze parameters/modules optionally
if freezer is not None:
freezer.freeze()
# reset metric
train_metric_logger.reset()
def setup_validate():
# set evaluate mode
model.eval()
loss_fn.eval()
# reset metric
val_metric_logger.reset()
setup_train()
end = time.time()
for iteration, data_batch in enumerate(train_dataloader, start_iteration):
data_time = time.time() - end
# copy data from cpu to gpu
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items()}
# forward
preds = model(data_batch)
# update losses
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch)
total_loss = sum(loss_dict.values())
# It is slightly faster to update metrics and meters before backward
with torch.no_grad():
train_metric_logger.update(loss=total_loss, **loss_dict)
for metric in train_metric:
metric.update_dict(preds, data_batch)
# backward
total_loss.backward()
if cfg.OPTIMIZER.MAX_GRAD_NORM > 0:
# CAUTION: built-in clip_grad_norm_ clips the total norm.
nn.utils.clip_grad_norm_(model.parameters(), max_norm=cfg.OPTIMIZER.MAX_GRAD_NORM)
optimizer.step()
batch_time = time.time() - end
train_metric_logger.update(time=batch_time, data=data_time)
cur_iter = iteration + 1
# log
if cur_iter == 1 or (cfg.TRAIN.LOG_PERIOD > 0 and cur_iter % cfg.TRAIN.LOG_PERIOD) == 0:
logger.info(
train_metric_logger.delimiter.join(
[
'iter: {iter:4d}',
'{meters}',
'lr: {lr:.2e}',
'max mem: {memory:.0f}',
]
).format(
iter=cur_iter,
meters=str(train_metric_logger),
lr=optimizer.param_groups[0]['lr'],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
# summary
if summary_writier is not None and cfg.TRAIN.SUMMARY_PERIOD > 0 and cur_iter % cfg.TRAIN.SUMMARY_PERIOD == 0:
keywords = ('loss', 'acc', 'iou')
for name, meter in train_metric_logger.meters.items():
if all(k not in name for k in keywords):
continue
summary_writier.add_scalar('train/' + name, meter.global_avg, global_step=cur_iter)
# checkpoint
if (ckpt_period > 0 and cur_iter % ckpt_period == 0) or cur_iter == max_iteration:
checkpoint_data['iteration'] = cur_iter
checkpoint_data[best_metric_name] = best_metric
checkpointer.save('model_{:06d}'.format(cur_iter), **checkpoint_data)
# ---------------------------------------------------------------------------- #
# validate for one epoch
# ---------------------------------------------------------------------------- #
if val_period > 0 and (cur_iter % val_period == 0 or cur_iter == max_iteration):
start_time_val = time.time()
setup_validate()
end = time.time()
with torch.no_grad():
for iteration_val, data_batch in enumerate(val_dataloader):
data_time = time.time() - end
# copy data from cpu to gpu
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items()}
# forward
preds = model(data_batch)
# update losses and metrics
loss_dict = loss_fn(preds, data_batch)
total_loss = sum(loss_dict.values())
# update metrics and meters
val_metric_logger.update(loss=total_loss, **loss_dict)
for metric in val_metric:
metric.update_dict(preds, data_batch)
batch_time = time.time() - end
val_metric_logger.update(time=batch_time, data=data_time)
end = time.time()
if cfg.VAL.LOG_PERIOD > 0 and iteration_val % cfg.VAL.LOG_PERIOD == 0:
logger.info(
val_metric_logger.delimiter.join(
[
'iter: {iter:4d}',
'{meters}',
'max mem: {memory:.0f}',
]
).format(
iter=iteration,
meters=str(val_metric_logger),
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
epoch_time_val = time.time() - start_time_val
logger.info('Iteration[{}]-Val {} total_time: {:.2f}s'.format(
cur_iter, val_metric_logger.summary_str, epoch_time_val))
# summary
if summary_writier is not None:
keywords = ('loss', 'acc', 'iou')
for name, meter in val_metric_logger.meters.items():
if all(k not in name for k in keywords):
continue
summary_writier.add_scalar('val/' + name, meter.global_avg, global_step=cur_iter)
# best validation
if cfg.VAL.METRIC in val_metric_logger.meters:
cur_metric = val_metric_logger.meters[cfg.VAL.METRIC].global_avg
if best_metric is None \
or ('loss' not in cfg.VAL.METRIC and cur_metric > best_metric) \
or ('loss' in cfg.VAL.METRIC and cur_metric < best_metric):
best_metric = cur_metric
checkpoint_data['iteration'] = cur_iter
checkpoint_data[best_metric_name] = best_metric
checkpointer.save('model_best', tag=False, **checkpoint_data)
# restore training
setup_train()
# since pytorch v1.1.0, lr_scheduler is called after optimization.
if scheduler is not None:
scheduler.step()
end = time.time()
logger.info('Best val-{} = {}'.format(cfg.VAL.METRIC, best_metric))
return model
def main():
args = parse_args()
# load the configuration
# import on-the-fly to avoid overwriting cfg
from common.config import purge_cfg
from mvpnet.config.sem_seg_3d import cfg
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
purge_cfg(cfg)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
# replace '@' with config path
if output_dir:
config_path = osp.splitext(args.config_file)[0]
output_dir = output_dir.replace('@', config_path.replace('configs', 'outputs'))
if osp.isdir(output_dir):
warnings.warn('Output directory exists.')
os.makedirs(output_dir, exist_ok=True)
# run name
timestamp = time.strftime('%m-%d_%H-%M-%S')
hostname = socket.gethostname()
run_name = '{:s}.{:s}'.format(timestamp, hostname)
logger = setup_logger('mvpnet', output_dir, comment='train.{:s}'.format(run_name))
logger.info('{:d} GPUs available'.format(torch.cuda.device_count()))
logger.info(args)
from common.utils.misc import collect_env_info
logger.info('Collecting env info (might take some time)\n' + collect_env_info())
logger.info('Loaded configuration file {:s}'.format(args.config_file))
logger.info('Running with config:\n{}'.format(cfg))
assert cfg.TASK == 'sem_seg_3d'
train(cfg, output_dir, run_name)
if __name__ == '__main__':
main()
| 38.641791 | 117 | 0.579452 |
1b9c8afc1c1891eb64e0bd29e4e9910221cffe1d | 41,440 | py | Python | src/v5.1/resources/swagger_client/api/learning_standard_equivalence_associations_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 2 | 2021-04-27T17:18:17.000Z | 2021-04-27T19:14:39.000Z | src/v5.1/resources/swagger_client/api/learning_standard_equivalence_associations_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | null | null | null | src/v5.1/resources/swagger_client/api/learning_standard_equivalence_associations_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 1 | 2022-01-06T09:43:11.000Z | 2022-01-06T09:43:11.000Z | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class LearningStandardEquivalenceAssociationsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_learning_standard_equivalence_association_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_learning_standard_equivalence_association_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_learning_standard_equivalence_association_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_learning_standard_equivalence_association_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_learning_standard_equivalence_association_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_learning_standard_equivalence_association_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_learning_standard_equivalence_association_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_learning_standard_equivalence_association_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_learning_standard_equivalence_associations(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_learning_standard_equivalence_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiLearningStandardEquivalenceAssociation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_learning_standard_equivalence_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deletes_learning_standard_equivalence_associations_with_http_info(**kwargs) # noqa: E501
return data
def deletes_learning_standard_equivalence_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_learning_standard_equivalence_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiLearningStandardEquivalenceAssociation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_learning_standard_equivalence_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_learning_standard_equivalence_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_learning_standard_equivalence_associations`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiLearningStandardEquivalenceAssociation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_learning_standard_equivalence_associations(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_learning_standard_equivalence_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str namespace: The namespace of the organization that has created and owns the association.
:param str source_learning_standard_id: The identifier for the specific learning standard (e.g., 111.15.3.1.A).
:param str target_learning_standard_id: The identifier for the specific learning standard (e.g., 111.15.3.1.A).
:param str learning_standard_equivalence_strength_descriptor: A measure that indicates the strength or quality of the equivalence relationship.
:param date effective_date: The date that the association is considered to be applicable or effective.
:param str id:
:param str learning_standard_equivalence_strength_description: Captures supplemental information on the relationship. Recommended for use only when the match is partial.
:return: list[EdFiLearningStandardEquivalenceAssociation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_learning_standard_equivalence_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_learning_standard_equivalence_associations_with_http_info(**kwargs) # noqa: E501
return data
def get_learning_standard_equivalence_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_learning_standard_equivalence_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str namespace: The namespace of the organization that has created and owns the association.
:param str source_learning_standard_id: The identifier for the specific learning standard (e.g., 111.15.3.1.A).
:param str target_learning_standard_id: The identifier for the specific learning standard (e.g., 111.15.3.1.A).
:param str learning_standard_equivalence_strength_descriptor: A measure that indicates the strength or quality of the equivalence relationship.
:param date effective_date: The date that the association is considered to be applicable or effective.
:param str id:
:param str learning_standard_equivalence_strength_description: Captures supplemental information on the relationship. Recommended for use only when the match is partial.
:return: list[EdFiLearningStandardEquivalenceAssociation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'namespace', 'source_learning_standard_id', 'target_learning_standard_id', 'learning_standard_equivalence_strength_descriptor', 'effective_date', 'id', 'learning_standard_equivalence_strength_description'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_learning_standard_equivalence_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_learning_standard_equivalence_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_learning_standard_equivalence_associations`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('namespace' in params and
len(params['namespace']) > 255):
raise ValueError("Invalid value for parameter `namespace` when calling `get_learning_standard_equivalence_associations`, length must be less than or equal to `255`") # noqa: E501
if self.api_client.client_side_validation and ('source_learning_standard_id' in params and
len(params['source_learning_standard_id']) > 60):
raise ValueError("Invalid value for parameter `source_learning_standard_id` when calling `get_learning_standard_equivalence_associations`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('target_learning_standard_id' in params and
len(params['target_learning_standard_id']) > 60):
raise ValueError("Invalid value for parameter `target_learning_standard_id` when calling `get_learning_standard_equivalence_associations`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('learning_standard_equivalence_strength_descriptor' in params and
len(params['learning_standard_equivalence_strength_descriptor']) > 306):
raise ValueError("Invalid value for parameter `learning_standard_equivalence_strength_descriptor` when calling `get_learning_standard_equivalence_associations`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('learning_standard_equivalence_strength_description' in params and
len(params['learning_standard_equivalence_strength_description']) > 255):
raise ValueError("Invalid value for parameter `learning_standard_equivalence_strength_description` when calling `get_learning_standard_equivalence_associations`, length must be less than or equal to `255`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
if 'total_count' in params:
query_params.append(('totalCount', params['total_count'])) # noqa: E501
if 'namespace' in params:
query_params.append(('namespace', params['namespace'])) # noqa: E501
if 'source_learning_standard_id' in params:
query_params.append(('sourceLearningStandardId', params['source_learning_standard_id'])) # noqa: E501
if 'target_learning_standard_id' in params:
query_params.append(('targetLearningStandardId', params['target_learning_standard_id'])) # noqa: E501
if 'learning_standard_equivalence_strength_descriptor' in params:
query_params.append(('learningStandardEquivalenceStrengthDescriptor', params['learning_standard_equivalence_strength_descriptor'])) # noqa: E501
if 'effective_date' in params:
query_params.append(('effectiveDate', params['effective_date'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'learning_standard_equivalence_strength_description' in params:
query_params.append(('learningStandardEquivalenceStrengthDescription', params['learning_standard_equivalence_strength_description'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiLearningStandardEquivalenceAssociation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_learning_standard_equivalence_associations_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_learning_standard_equivalence_associations_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiLearningStandardEquivalenceAssociation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_learning_standard_equivalence_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_learning_standard_equivalence_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_learning_standard_equivalence_associations_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_learning_standard_equivalence_associations_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiLearningStandardEquivalenceAssociation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_learning_standard_equivalence_associations_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_learning_standard_equivalence_associations_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiLearningStandardEquivalenceAssociation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_learning_standard_equivalence_association(self, learning_standard_equivalence_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_learning_standard_equivalence_association(learning_standard_equivalence_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiLearningStandardEquivalenceAssociation learning_standard_equivalence_association: The JSON representation of the \"learningStandardEquivalenceAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_learning_standard_equivalence_association_with_http_info(learning_standard_equivalence_association, **kwargs) # noqa: E501
else:
(data) = self.post_learning_standard_equivalence_association_with_http_info(learning_standard_equivalence_association, **kwargs) # noqa: E501
return data
def post_learning_standard_equivalence_association_with_http_info(self, learning_standard_equivalence_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_learning_standard_equivalence_association_with_http_info(learning_standard_equivalence_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiLearningStandardEquivalenceAssociation learning_standard_equivalence_association: The JSON representation of the \"learningStandardEquivalenceAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['learning_standard_equivalence_association'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_learning_standard_equivalence_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'learning_standard_equivalence_association' is set
if self.api_client.client_side_validation and ('learning_standard_equivalence_association' not in params or
params['learning_standard_equivalence_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `learning_standard_equivalence_association` when calling `post_learning_standard_equivalence_association`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'learning_standard_equivalence_association' in params:
body_params = params['learning_standard_equivalence_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_learning_standard_equivalence_association(self, id, learning_standard_equivalence_association, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_learning_standard_equivalence_association(id, learning_standard_equivalence_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiLearningStandardEquivalenceAssociation learning_standard_equivalence_association: The JSON representation of the \"learningStandardEquivalenceAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_learning_standard_equivalence_association_with_http_info(id, learning_standard_equivalence_association, **kwargs) # noqa: E501
else:
(data) = self.put_learning_standard_equivalence_association_with_http_info(id, learning_standard_equivalence_association, **kwargs) # noqa: E501
return data
def put_learning_standard_equivalence_association_with_http_info(self, id, learning_standard_equivalence_association, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_learning_standard_equivalence_association_with_http_info(id, learning_standard_equivalence_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiLearningStandardEquivalenceAssociation learning_standard_equivalence_association: The JSON representation of the \"learningStandardEquivalenceAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'learning_standard_equivalence_association', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_learning_standard_equivalence_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `put_learning_standard_equivalence_association`") # noqa: E501
# verify the required parameter 'learning_standard_equivalence_association' is set
if self.api_client.client_side_validation and ('learning_standard_equivalence_association' not in params or
params['learning_standard_equivalence_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `learning_standard_equivalence_association` when calling `put_learning_standard_equivalence_association`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'learning_standard_equivalence_association' in params:
body_params = params['learning_standard_equivalence_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/learningStandardEquivalenceAssociations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 57.555556 | 493 | 0.680405 |
7ac505b3efb2ee5e29189a3312c2a89ae79a9876 | 128 | py | Python | docker/app/conf/application.py | sled30/python-sample-app-test | 35a8a1669023dfde7dfc14e6f6cba6926fb1d610 | [
"MIT"
] | null | null | null | docker/app/conf/application.py | sled30/python-sample-app-test | 35a8a1669023dfde7dfc14e6f6cba6926fb1d610 | [
"MIT"
] | null | null | null | docker/app/conf/application.py | sled30/python-sample-app-test | 35a8a1669023dfde7dfc14e6f6cba6926fb1d610 | [
"MIT"
] | null | null | null | from api import api as application
if __name__ == "__main__":
application.run(host='0.0.0.0')
# api.run(host='0.0.0.0')
| 21.333333 | 35 | 0.648438 |
8c9d5afe40d4c85f1596803c4f4e1fd94937bfcc | 1,528 | py | Python | sfi_www/urls.py | sfikrakow/www | ec4e1451849863749d2dc977b8a91c7767e75a1a | [
"MIT"
] | 5 | 2020-04-27T22:51:14.000Z | 2020-12-03T13:08:49.000Z | sfi_www/urls.py | sfikrakow/www | ec4e1451849863749d2dc977b8a91c7767e75a1a | [
"MIT"
] | 1 | 2021-04-02T22:31:11.000Z | 2021-04-02T22:31:12.000Z | sfi_www/urls.py | sfikrakow/www | ec4e1451849863749d2dc977b8a91c7767e75a1a | [
"MIT"
] | 2 | 2020-04-28T07:08:25.000Z | 2021-04-16T09:49:08.000Z | from django.conf import settings
from django.conf.urls import include
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.urls import path
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.contrib.sitemaps.views import sitemap
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from agenda.views import EditionPodcastFeedView
from common.views import sitemap_index
from forms.views import ContactFormView
urlpatterns = [
path('oidc/', include('mozilla_django_oidc.urls')),
path('django-admin/', admin.site.urls),
path('admin/', include(wagtailadmin_urls)),
path('documents/', include(wagtaildocs_urls)),
path('contact_form/', ContactFormView.as_view()),
path('feeds/podcasts/<slug:slug>/feed.rss', EditionPodcastFeedView(), name='feeds_podcast'),
path('sitemap.xml', sitemap_index)
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns = urlpatterns + i18n_patterns(
path('sitemap.xml', sitemap),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
path("", include(wagtail_urls)),
)
| 37.268293 | 96 | 0.768325 |
c996895a7b918da169eb2f22d7284c727253bfc5 | 3,626 | py | Python | safe/settings.py | MaryMbugua/Safe | 2aaa4760cfa96aafc4d37233fe7b4df584e2ed79 | [
"MIT"
] | null | null | null | safe/settings.py | MaryMbugua/Safe | 2aaa4760cfa96aafc4d37233fe7b4df584e2ed79 | [
"MIT"
] | null | null | null | safe/settings.py | MaryMbugua/Safe | 2aaa4760cfa96aafc4d37233fe7b4df584e2ed79 | [
"MIT"
] | null | null | null | """
Django settings for safe project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG',default=False,cast=bool)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'hood',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'safe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'safe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'hoodwatch',
'USER': 'nish',
'PASSWORD': 'Nish',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | 26.275362 | 91 | 0.699945 |
e7e96d5bcbaa7b76e47b48a67006f7044bcad3c8 | 17,357 | py | Python | cvpods/modeling/meta_arch/detr.py | reinforcementdriving/cvpods | 32d98b74745020be035a0e20337ad934201615c4 | [
"Apache-2.0"
] | 1 | 2021-04-24T17:01:29.000Z | 2021-04-24T17:01:29.000Z | cvpods/modeling/meta_arch/detr.py | wondervictor/cvpods | 614a975e5425bbaeb66bbd1ffca552d633ba89ca | [
"Apache-2.0"
] | null | null | null | cvpods/modeling/meta_arch/detr.py | wondervictor/cvpods | 614a975e5425bbaeb66bbd1ffca552d633ba89ca | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) BaseDetection, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from cvpods.layers import ShapeSpec, position_encoding_dict
from cvpods.modeling.backbone import Transformer
from cvpods.modeling.matcher import HungarianMatcher
from cvpods.structures import Boxes, ImageList, Instances
from cvpods.structures import boxes as box_ops
from cvpods.structures.boxes import generalized_box_iou
from cvpods.utils import comm
from cvpods.utils.metrics import accuracy
class DETR(nn.Module):
def __init__(self, cfg):
super(DETR, self).__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
# Build Backbone
self.backbone = cfg.build_backbone(
cfg, input_shape=ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
)
# Build Transformer
self.transformer = Transformer(cfg)
self.aux_loss = not cfg.MODEL.DETR.NO_AUX_LOSS
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
self.num_queries = cfg.MODEL.DETR.NUM_QUERIES
hidden_dim = self.transformer.d_model
# Build FFN
self.class_embed = nn.Linear(hidden_dim, self.num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
# Build Object Queries
self.query_embed = nn.Embedding(self.num_queries, hidden_dim)
backbone_out_shapes = self.backbone.output_shape()["res5"]
self.input_proj = nn.Conv2d(backbone_out_shapes.channels, hidden_dim, kernel_size=1)
self.position_embedding = position_encoding_dict[cfg.MODEL.DETR.POSITION_EMBEDDING](
num_pos_feats=hidden_dim // 2,
temperature=cfg.MODEL.DETR.TEMPERATURE,
normalize=True if cfg.MODEL.DETR.POSITION_EMBEDDING == "sine" else False,
scale=None,
)
self.weight_dict = {
"loss_ce": cfg.MODEL.DETR.CLASS_LOSS_COEFF,
"loss_bbox": cfg.MODEL.DETR.BBOX_LOSS_COEFF,
"loss_giou": cfg.MODEL.DETR.GIOU_LOSS_COEFF,
}
if self.aux_loss:
self.aux_weight_dict = {}
for i in range(cfg.MODEL.DETR.TRANSFORMER.NUM_DEC_LAYERS - 1):
self.aux_weight_dict.update({k + f"_{i}": v for k, v in self.weight_dict.items()})
self.weight_dict.update(self.aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
matcher = HungarianMatcher(
cost_class=cfg.MODEL.DETR.COST_CLASS,
cost_bbox=cfg.MODEL.DETR.COST_BBOX,
cost_giou=cfg.MODEL.DETR.COST_GIOU,
)
self.criterion = SetCriterion(
self.num_classes,
matcher=matcher,
weight_dict=self.weight_dict,
eos_coef=cfg.MODEL.DETR.EOS_COEFF,
losses=losses,
)
self.post_processors = {"bbox": PostProcess()}
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
if not cfg.MODEL.RESNETS.STRIDE_IN_1X1:
# Custom or torch pretrain weights
self.normalizer = lambda x: (x / 255.0 - pixel_mean) / pixel_std
else:
# MSRA pretrain weights
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
B, C, H, W = images.tensor.shape
device = images.tensor.device
mask = torch.ones((B, H, W), dtype=torch.bool, device=device)
for img_shape, m in zip(images.image_sizes, mask):
m[: img_shape[0], : img_shape[1]] = False
src = self.backbone(images.tensor)["res5"]
mask = F.interpolate(mask[None].float(), size=src.shape[-2:]).bool()[0]
pos = self.position_embedding(src, mask)
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.training:
targets = self.convert_anno_format(batched_inputs)
if self.aux_loss:
out["aux_outputs"] = [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
loss_dict = self.criterion(out, targets)
for k, v in loss_dict.items():
loss_dict[k] = v * self.weight_dict[k] if k in self.weight_dict else v
return loss_dict
else:
target_sizes = torch.stack(
[
torch.tensor([
bi.get("height", img_size[0]),
bi.get("width", img_size[1])],
device=self.device)
for bi, img_size in zip(batched_inputs, images.image_sizes)
]
)
res = self.post_processors["bbox"](out, target_sizes)
processed_results = []
# for results_per_image, input_per_image, image_size in zip(
for results_per_image, _, image_size in zip(res, batched_inputs, images.image_sizes):
result = Instances(image_size)
result.pred_boxes = Boxes(results_per_image["boxes"].float())
result.scores = results_per_image["scores"].float()
result.pred_classes = results_per_image["labels"]
processed_results.append({"instances": result})
return processed_results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].float().to(self.device) for x in batched_inputs]
images = [self.normalizer(img) for img in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
def convert_anno_format(self, batched_inputs):
targets = []
for bi in batched_inputs:
target = {}
h, w = bi["image"].shape[-2:]
boxes = box_ops.box_xyxy_to_cxcywh(
bi["instances"].gt_boxes.tensor / torch.tensor([w, h, w, h], dtype=torch.float32)
)
target["boxes"] = boxes.to(self.device)
target["area"] = bi["instances"].gt_boxes.area().to(self.device)
target["labels"] = bi["instances"].gt_classes.to(self.device)
if hasattr(bi["instances"], "gt_masks"):
target["masks"] = bi["instances"].gt_masks
target["iscrowd"] = torch.zeros_like(target["labels"], device=self.device)
target["orig_size"] = torch.tensor([bi["height"], bi["width"]], device=self.device)
target["size"] = torch.tensor([h, w], device=self.device)
target["image_id"] = torch.tensor(bi["image_id"], device=self.device)
targets.append(target)
return targets
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their
relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
del num_boxes
src_logits = outputs["pred_logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses["class_error"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Compute the cardinality error, ie the absolute error in the number of predicted non-empty
boxes. This is not really a loss, it is intended for logging purposes only. It doesn't
propagate gradients
"""
del indices
del num_boxes
pred_logits = outputs["pred_logits"]
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the
image size.
"""
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes)
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
"""
This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each
loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
)
if comm.get_world_size() > 1:
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / comm.get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of
# each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == "labels":
# Logging is enabled only for the last layer
kwargs = {"log": False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
"""
Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images
of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment,
but before padding
"""
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| 41.723558 | 100 | 0.612779 |
8f0c08070cdc926d8829459beaa4ca7be716f1a5 | 1,322 | py | Python | Tools/remove_car_from_sun2012.py | n8886919/YOLO | 3726a8819d7880e03f4a7e056751ad10a850201b | [
"BSD-Source-Code"
] | 52 | 2019-03-27T05:18:28.000Z | 2022-03-22T04:30:17.000Z | Tools/remove_car_from_sun2012.py | n8886919/YOLO | 3726a8819d7880e03f4a7e056751ad10a850201b | [
"BSD-Source-Code"
] | 6 | 2019-08-27T07:48:51.000Z | 2022-01-13T01:08:26.000Z | Tools/remove_car_from_sun2012.py | n8886919/YOLO | 3726a8819d7880e03f4a7e056751ad10a850201b | [
"BSD-Source-Code"
] | 17 | 2019-03-27T15:12:54.000Z | 2022-03-18T21:30:14.000Z | def remove_car_from_sun2012():
from shutil import copyfile
import xml.etree.cElementTree as ET
bg_root = '/media/nolan/HDD1/sun2012pascalformat'
sun_img_path = os.path.join(bg_root, 'JPEGImages')
sun_anno_path = os.path.join(bg_root, 'Annotations')
counter = 0
for img in os.listdir(sun_img_path):
detected = False
img_name = (img.split('.')[0]).split('/')[-1]
img_xml_path = os.path.join(sun_anno_path, (img_name+'.xml'))
try:
img_xml = ET.ElementTree(file=img_xml_path)
root = img_xml.getroot()
for child in root:
if child.tag == 'object':
for sub_child in child:
if sub_child.tag == 'name':
text = sub_child.text
if ('car' in text or 'van' in text or 'truck' in text):
detected = True
break
if detected:
break
except Exception as e:
pass
if not detected:
counter += 1
src = os.path.join(sun_img_path, img)
dst = os.path.join('/media/nolan/9fc64877-3935-46df-9ad0-c601733f5888/sun2012', img)
copyfile(src, dst)
print(counter)
| 37.771429 | 96 | 0.52118 |
d8cfc5273e137c985b9a50691494c4aea99e27f0 | 2,363 | py | Python | template/iv.py | houzeyu2683/PythonCrawlerTemplate | 701d371789fc81eb8ed052e9e8dd0e83ed847580 | [
"MIT"
] | null | null | null | template/iv.py | houzeyu2683/PythonCrawlerTemplate | 701d371789fc81eb8ed052e9e8dd0e83ed847580 | [
"MIT"
] | null | null | null | template/iv.py | houzeyu2683/PythonCrawlerTemplate | 701d371789fc81eb8ed052e9e8dd0e83ed847580 | [
"MIT"
] | null | null | null |
## The packages.
from selenium import webdriver
import pandas, os, time, tqdm
import re
import time
## The goal.
'''
從 ptt 的股票版搜尋文章,時間由新至舊,將搜尋的文章擷取,輸出成表格。
'''
##
## The arguments.
platform = 'dcard'
board = 'mood'
site = "https://www.dcard.tw/f/mood"
number = 20
folder = "LOG/IV"
confirmation = False
##
## Initial process.
os.makedirs(folder) if not os.path.isdir(folder) else None
option = webdriver.chrome.options.Options()
option.binary_location = "/usr/bin/google-chrome"
driver = webdriver.Chrome(options=option, executable_path='driver/chrome')
driver.set_window_size(1920/5, 1080/2)
driver.get(site)
driver.find_element_by_css_selector(".btn-big").click() if confirmation else None
document = {
"platform":platform,
"board":board,
"title":[],
"link":[],
"author":[],
"date":[],
"content":[],
"comment":[]
}
## Relax a second.
time.sleep(1)
##
## Get title and link.
for n in range(1, number+1):
document['title'] += [re.sub("#", "", i.text) for i in driver.find_elements_by_css_selector('.cUGTXH')]
document['link'] += [i.get_attribute('href') for i in driver.find_elements_by_xpath('//h2[@class="tgn9uw-2 jWUdzO"]/a')]
driver.execute_script("var q=document.documentElement.scrollTop={}".format(n * 10000))
time.sleep(1)
pass
##
## Get other information base on link.
for l in tqdm.tqdm(document['link']):
driver.get(l)
time.sleep(5)
try:
document['date'] += [driver.find_element_by_css_selector(".boQZzA+ .boQZzA").text]
document['author'] += [driver.find_element_by_xpath("//div[@class='s3d701-2 kBmYXB']").text]
document['content'] += [driver.find_element_by_xpath("//div[@class='phqjxq-0 fQNVmg']").text]
document['comment'] += ['\n\n'.join([i.text for i in driver.find_elements_by_xpath("//div[@class='sc-71lpws-1 hcbtbx-0 kxmuAN cCOVWi']")])]
pass
except:
document['date'] += [None]
document['author'] += [None]
document['content'] += [None]
document['comment'] += [None]
pass
pass
driver.close()
##
## Convert to table.
table = {
"data":pandas.DataFrame(document),
"location":os.path.join(folder, "{} {} {}.csv".format(platform, board, re.sub(" ", "-", time.ctime())))
}
table['data'].to_csv(table['location'], index=False, encoding="utf_8_sig")
pass
| 24.360825 | 147 | 0.639441 |
e8fb23e00c52d8542897df8937fa3b60bad7b2ad | 4,710 | py | Python | src/datatools/columns_rename.py | ingwersen-erik/dev-datatools | 907a8a3ec68e06b757918618c2c292deef9bf2a3 | [
"MIT"
] | null | null | null | src/datatools/columns_rename.py | ingwersen-erik/dev-datatools | 907a8a3ec68e06b757918618c2c292deef9bf2a3 | [
"MIT"
] | null | null | null | src/datatools/columns_rename.py | ingwersen-erik/dev-datatools | 907a8a3ec68e06b757918618c2c292deef9bf2a3 | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2021 Erik Ingwersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR ABOUT THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import logging
from typing import Optional
import pandas as pd
def rename_back(
df: pd.DataFrame,
attr_column_map: str | None = "column_map",
errors: str | None = "ignore",
) -> pd.DataFrame:
"""
Rename columns back to their original names. Function tries to do that,
by relying on a potentially saved attribute, that contains a dictionary
with the original and their new names.
Parameters
----------
df : pd.DataFrame
The dataframe to rename.
attr_column_map: str, optional
The attribute name that suposedly stores the column map.
By default, "column_map"
errors : str {'ignore', 'raise'}, optional
The error handling strategy to use when old column names are not
inside the dataframe attributes. By default, 'ignore' is used.
Returns
-------
pd.DataFrame
The dataframe with the columns renamed, when attribute
:param:`column_map` exists.
Raises
------
AttributeError
When attribute :param:`column_map` does not exist.
Examples
--------
>>> _df = pd.DataFrame(
... {
... 'order_material': ['A', 'B', 'C'],
... 'site': ['X', 'Y', 'Z'],
... pd.to_datetime('2021-10-10'): [0, 10, 0],
... }
... )
>>> _df.attrs['column_map'] = {'order_material': 'material'}
>>> rename_back(_df)
material site 2021-10-10 00:00:00
0 A X 0
1 B Y 10
2 C Z 0
"""
column_map = df.attrs.get(attr_column_map)
if column_map is None and errors == "raise":
raise AttributeError(
f"Tried to rename columns, but no {attr_column_map} attribute found"
)
if column_map:
logging.info("Renaming columns back to original names")
column_map = {v: k for k, v in column_map.items()}
df = df.rename(columns=column_map)
return df
def fmt_colnames(_df: pd.DataFrame) -> pd.DataFrame:
"""
Beautifies the column names of a given dataframe.
Formatting Options
------------------
* Convert column names to uppercase
* Replaces underscores with spaces "_" -> " "
* Converts any datetime columns to dates
Parameters
----------
_df : pd.DataFrame
The dataframe to rename the columns for.
Returns
-------
pd.DataFrame
The dataframe with renamed columns.
Examples
--------
>>> # noinspection PyShadowingNames
>>> _df = pd.DataFrame(
... {
... 'order_material': ['A', 'B', 'C'],
... 'site': ['X', 'Y', 'Z'],
... '2021/10/10': [0, 10, 0]
... }
... )
>>> fmt_colnames(_df)
ORDER MATERIAL SITE 2021-10-10
0 A X 0
1 B Y 10
2 C Z 0
"""
df_original_names = rename_back(_df)
if df_original_names is not None:
return df_original_names
return _df.rename(
columns={
original_column: pd.to_datetime(original_column, errors="ignore").strftime(
"%Y-%m-%d"
)
for original_column in _df.columns
if isinstance(
pd.to_datetime(original_column, errors="ignore"), pd.Timestamp
)
}
).rename(
columns={
original_column: str(original_column).upper().replace("_", " ")
for original_column in _df.columns
}
)
| 31.4 | 87 | 0.59448 |
c70c54385ad033648389d9d18f5d3407ce091306 | 344 | py | Python | urls.py | giovanniherdigein/my_first_django | ed547cf8802951a6af17c0683a642548e025935f | [
"Unlicense"
] | null | null | null | urls.py | giovanniherdigein/my_first_django | ed547cf8802951a6af17c0683a642548e025935f | [
"Unlicense"
] | null | null | null | urls.py | giovanniherdigein/my_first_django | ed547cf8802951a6af17c0683a642548e025935f | [
"Unlicense"
] | null | null | null | from django.urls import path
from . import views
app_name ='crudsite'
urlpatterns=[
path('',views.index,name='index'),
path('create_item',views.createItem,name= 'create_item'),
path('update_item/<int:item_id>/',views.updateItem,name='update_item'),
path('delete_item/<int:item_id>/',views.deleteItem,name='delete_item'),
] | 34.4 | 76 | 0.709302 |
71d0705f6135c102dc41859770354b804739b3ce | 2,029 | py | Python | venv/Lib/site-packages/pyrogram/raw/functions/account/get_content_settings.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/functions/account/get_content_settings.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/functions/account/get_content_settings.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetContentSettings(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``126``
- ID: ``0x8b9b4dae``
**No parameters required.**
Returns:
:obj:`account.ContentSettings <pyrogram.raw.base.account.ContentSettings>`
"""
__slots__: List[str] = []
ID = 0x8b9b4dae
QUALNAME = "functions.account.GetContentSettings"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetContentSettings":
# No flags
return GetContentSettings()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
| 30.283582 | 103 | 0.632824 |
01fe3f40b6829f7fd22099cd602aafa49135bd95 | 13,623 | py | Python | doc/make.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | doc/make.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | doc/make.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
<del>Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.</del>
2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so
Usage
-----
python make.py clean
python make.py html
"""
from __future__ import print_function
import io
import glob # noqa
import os
import shutil
import sys
from contextlib import contextmanager
import sphinx # noqa
import argparse
import jinja2 # noqa
# Debian's debian/rules overrides it to point to correct built pandas
# os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def upload_dev(user='pandas'):
'push a copy to the pydata dev directory'
if os.system('cd build/html; rsync -avz . {0}@pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)):
raise SystemExit('Upload to Pydata Dev failed')
def upload_dev_pdf(user='pandas'):
'push a copy to the pydata dev directory'
if os.system('cd build/latex; scp pandas.pdf {0}@pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)):
raise SystemExit('PDF upload to Pydata Dev failed')
def upload_stable(user='pandas'):
'push a copy to the pydata stable directory'
if os.system('cd build/html; rsync -avz . {0}@pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)):
raise SystemExit('Upload to stable failed')
def upload_stable_pdf(user='pandas'):
'push a copy to the pydata dev directory'
if os.system('cd build/latex; scp pandas.pdf {0}@pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)):
raise SystemExit('PDF upload to stable failed')
def upload_prev(ver, doc_root='./', user='pandas'):
'push a copy of older release to appropriate version directory'
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . %[email protected]:%s -essh'
cmd = cmd % (local_dir, user, remote_dir)
print(cmd)
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf %[email protected]:%s'
pdf_cmd = pdf_cmd % (local_dir, user, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
def build_pandas():
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
def build_prev(ver):
if os.system('git checkout v%s' % ver) != 1:
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
os.system('python make.py clean')
os.system('python make.py html')
os.system('python make.py latex')
os.system('git checkout master')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
@contextmanager
def cleanup_nb(nb):
try:
yield
finally:
try:
os.remove(nb + '.executed')
except OSError:
pass
def get_kernel():
"""Find the kernel name for your python version"""
return 'python%s' % sys.version_info.major
def execute_nb(src, dst, allow_errors=False, timeout=1000, kernel_name=''):
"""
Execute notebook in `src` and write the output to `dst`
Parameters
----------
src, dst: str
path to notebook
allow_errors: bool
timeout: int
kernel_name: str
defualts to value set in notebook metadata
Returns
-------
dst: str
"""
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
with io.open(src, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(allow_errors=allow_errors,
timeout=timeout,
kernel_name=kernel_name)
ep.preprocess(nb, resources={})
with io.open(dst, 'wt', encoding='utf-8') as f:
nbformat.write(nb, f)
return dst
def convert_nb(src, dst, to='html', template_file='basic'):
"""
Convert a notebook `src`.
Parameters
----------
src, dst: str
filepaths
to: {'rst', 'html'}
format to export to
template_file: str
name of template file to use. Default 'basic'
"""
from nbconvert import HTMLExporter, RSTExporter
dispatch = {'rst': RSTExporter, 'html': HTMLExporter}
exporter = dispatch[to.lower()](template_file=template_file)
(body, resources) = exporter.from_filename(src)
with io.open(dst, 'wt', encoding='utf-8') as f:
f.write(body)
return dst
def html():
check_build()
notebooks = [
'source/html-styling.ipynb',
]
for nb in notebooks:
with cleanup_nb(nb):
try:
print("Converting %s" % nb)
kernel_name = get_kernel()
executed = execute_nb(nb, nb + '.executed', allow_errors=True,
kernel_name=kernel_name)
convert_nb(executed, nb.rstrip('.ipynb') + '.html')
except (ImportError, IndexError) as e:
print(e)
print("Failed to convert %s" % nb)
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
# remove stale file
os.system('rm source/html-styling.html')
os.system('cd build; rm -f html/pandas.zip;')
except:
pass
def zip_html():
try:
print("\nZipping up HTML docs...")
# just in case the wonky build box doesn't have zip
# don't fail this.
os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ')
print("\n")
except:
pass
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
print("Rendering LaTeX failed.")
print("You may still be able to get a usable PDF file by going into 'build/latex'")
print("and executing 'pdflatex pandas.tex' for the requisite number of passes.")
print("Or using the 'latex_forced' target")
raise SystemExit
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Manually call pdflatex, 3 passes should ensure latex fixes up
# all the required cross-references and such.
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
def auto_dev_build(debug=False):
msg = ''
try:
step = 'clean'
clean()
step = 'html'
html()
step = 'upload dev'
upload_dev()
if not debug:
sendmail(step)
step = 'latex'
latex()
step = 'upload pdf'
upload_dev_pdf()
if not debug:
sendmail(step)
except (Exception, SystemExit) as inst:
msg = str(inst) + '\n'
sendmail(step, '[ERROR] ' + msg)
def sendmail(step=None, err_msg=None):
from_name, to_name = _get_config()
if step is None:
step = ''
if err_msg is None or '[ERROR]' not in err_msg:
msgstr = 'Daily docs %s completed successfully' % step
subject = "DOC: %s successful" % step
else:
msgstr = err_msg
subject = "DOC: %s failed" % step
import smtplib
from email.MIMEText import MIMEText
msg = MIMEText(msgstr)
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = to_name
server_str, port, login, pwd = _get_credentials()
server = smtplib.SMTP(server_str, port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, pwd)
try:
server.sendmail(from_name, to_name, msg.as_string())
finally:
server.close()
def _get_dir(subdir=None):
import getpass
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
if subdir is None:
subdir = '/code/scripts/config'
conf_dir = '%s/%s' % (HOME, subdir)
return conf_dir
def _get_credentials():
tmp_dir = _get_dir()
cred = '%s/credentials' % tmp_dir
with open(cred, 'r') as fh:
server, port, un, domain = fh.read().split(',')
port = int(port)
login = un + '@' + domain + '.com'
import base64
with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh:
pwd = base64.b64decode(fh.read())
return server, port, login, pwd
def _get_config():
tmp_dir = _get_dir()
with open('%s/addresses' % tmp_dir, 'r') as fh:
from_name, to_name = fh.read().split(',')
return from_name, to_name
funcd = {
'html': html,
'zip_html': zip_html,
'upload_dev': upload_dev,
'upload_stable': upload_stable,
'upload_dev_pdf': upload_dev_pdf,
'upload_stable_pdf': upload_stable_pdf,
'latex': latex,
'latex_forced': latex_forced,
'clean': clean,
'auto_dev': auto_dev_build,
'auto_debug': lambda: auto_dev_build(True),
'build_pandas': build_pandas,
'all': all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
import argparse
argparser = argparse.ArgumentParser(description="""
pandas documentation builder
""".strip())
# argparser.add_argument('-arg_name', '--arg_name',
# metavar='label for arg help',
# type=str|etc,
# nargs='N|*|?|+|argparse.REMAINDER',
# required=False,
# #choices='abc',
# help='help string',
# action='store|store_true')
# args = argparser.parse_args()
#print args.accumulate(args.integers)
def generate_index(api=True, single=False, **kwds):
from jinja2 import Template
with open("source/index.rst.template") as f:
t = Template(f.read())
with open("source/index.rst","w") as f:
f.write(t.render(api=api,single=single,**kwds))
import argparse
argparser = argparse.ArgumentParser(description="pandas documentation builder",
epilog="Targets : %s" % funcd.keys())
argparser.add_argument('--no-api',
default=False,
help='Ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=False,
help='filename of section to compile, e.g. "indexing"')
argparser.add_argument('--user',
type=str,
default=False,
help='Username to connect to the pydata server')
def main():
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.single:
args.single = os.path.basename(args.single).split(".rst")[0]
if 'clean' in unknown:
args.single=False
generate_index(api=not args.no_api and not args.single, single=args.single)
if len(sys.argv) > 2:
ftype = sys.argv[1]
ver = sys.argv[2]
if ftype == 'build_previous':
build_prev(ver, user=args.user)
if ftype == 'upload_previous':
upload_prev(ver, user=args.user)
elif len(sys.argv) == 2:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
arg, list(funcd.keys())))
if args.user:
func(user=args.user)
else:
func()
else:
small_docs = False
all()
# os.chdir(current_dir)
if __name__ == '__main__':
import sys
sys.exit(main())
| 28.5 | 95 | 0.593335 |
e1071060a680e49b4bfc307f8c45f59ed083707d | 4,596 | py | Python | the_index/objects/index.py | lastmeta/index_credit | fd85111341d996d678c1a5ac94832904288e3e48 | [
"CC0-1.0"
] | null | null | null | the_index/objects/index.py | lastmeta/index_credit | fd85111341d996d678c1a5ac94832904288e3e48 | [
"CC0-1.0"
] | null | null | null | the_index/objects/index.py | lastmeta/index_credit | fd85111341d996d678c1a5ac94832904288e3e48 | [
"CC0-1.0"
] | null | null | null | class User():
def __init__(self, credit: float, vote: str, against: int):
self.credit = credit
self.vote = vote
self.against = against
self.trades = {}
def __repr__(self):
return (
f'\n credit {self.credit}'
f'\n vote {self.vote}'
f'\n against {self.against}'
f'\n trades {self.trades}')
def trade(self, asset, amount):
self.trades[asset] = amount
def complete_trades(self, credit):
self.trades = {}
self.credit += credit
class Index():
def __init__(self, users: list, assets: dict, rates: dict):
self.users = users
self.assets = assets
self.rates = rates
self.mana = {}
self.weight = {}
def __repr__(self):
return (
f'\nusers {self.users}'
f'\nassets {self.assets}'
f'\nrates {self.rates}'
f'\nmana {self.mana}'
f'\nweight {self.weight}'
f'\nvalue {self.value()}')
def value(self):
return {k: v * self.rates[k] for k, v in self.assets.items()}
def clear_mana(self):
self.mana = {k: 0 for k, v in self.mana.items()}
def generate_ideal_allocation(self, mana_total):
values = self.value()
values_total = sum([v for v in values.values()])
self.weight = {
# I thought you had to weight it according to how large the asset is, but I guess not...
# k: (self.mana[k] / mana_total) * (v / values_total)
k: (self.mana[k] / mana_total)
for k, v in values.items()}
return {k: v + (v * self.weight[k]) for k, v in values.items()}
def apply_trades(self):
''' assumes only valid trades exist '''
for user in self.users:
credit_for_user = 0
print(self.assets, user.credit)
for k, amount in user.trades.items():
self.assets[k] += amount
credit_for_user += (amount * self.rates[k])
user.complete_trades(credit_for_user)
print(self.assets, user.credit)
def negotiate_allocations(self, ideal, trade, mana_total):
print('self.weight', self.weight)
for k, value in trade.items():
print(
f'\n{k}: {value} + abs({ideal[k]} - {value}) * {self.weight[k]}',
f'\n{k}: {value} + {abs(ideal[k] - value)} * {self.weight[k]}',
f'\n{k}: {value} + {abs(ideal[k] - value) * self.weight[k]}'
f'\n{k}: {value + abs(ideal[k] - value) * self.weight[k]}')
return {
k: value + abs(ideal[k] - value) * self.weight[k]
for k, value in trade.items()}
def rate_translation(self, negotiation):
'''
if the negotiation was our value and our asset counts is what it is,
what would the rates have to be?
'''
for k, v in negotiation.items():
print(k, 'rate:', self.rates[k], 'v:', v, 'count:', self.assets[k], '()', v / self.assets[k])
self.rates[k] = v / self.assets[k]
return None
def round(self):
'''
1. take allocation of value from last round
2. generate mana, tally up what it was spent on
(in demo you can only vote for one asset: all mana is excess mana)
3. generate ideal allocation
4. tally up and apply trades
5. calculate achieved allocation via trading
6. modify achieved allocation according to mana spent
7. translate allocation into an effect on rates and apply
'''
self.clear_mana()
mana_total = 0
for user in self.users:
self.mana[user.vote] = user.credit * user.against
mana_total += user.credit
ideal = self.generate_ideal_allocation(mana_total)
print('ideal', ideal)
self.apply_trades()
trade = self.value()
print('trade', trade)
negotiation = self.negotiate_allocations(ideal, trade, mana_total)
print('negotiation', negotiation)
self.rate_translation(negotiation)
def run():
users = [
User(credit=1, vote='btc', against=1),
User(credit=2, vote='eth', against=1),
User(credit=3, vote='xmr', against=-1)]
assets = {'btc': 1, 'eth': 2, 'xmr': 3}
rates = {'btc': 1, 'eth': 2, 'xmr': 3}
index = Index(users, assets, rates)
index.round()
users[0].trade('btc', 1)
index.round()
users[2].trade('xmr', -3)
index.round()
users[1].trade('eth', -1)
| 35.90625 | 105 | 0.544604 |
83fc082b545106d02622de20f2083e8a7562f96c | 25,777 | py | Python | venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/chardet/jisfreq.py | xiegudong45/typeidea | db6504a232d120d6ffa185730bd35b9b9ecffa6c | [
"Apache-2.0"
] | 38,667 | 2015-01-01T00:15:34.000Z | 2022-03-31T22:57:03.000Z | env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 8,417 | 2015-01-01T13:03:16.000Z | 2022-03-31T17:40:27.000Z | lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.py | anish03/weather-dash | d517fa9da9028d1fc5d8fd71d77cee829ddee87b | [
"MIT"
] | 11,269 | 2015-01-01T08:41:17.000Z | 2022-03-31T16:12:52.000Z | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JIS_CHAR_TO_FREQ_ORDER = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
)
| 79.070552 | 98 | 0.722155 |
e851f983570ccecaa86411e210398fa509c5ee74 | 5,199 | py | Python | enas/controller.py | dnddnjs/pytorch-vision | d432b467774f838bef37372d6cff3576c6559803 | [
"MIT"
] | 48 | 2018-10-14T12:13:54.000Z | 2021-12-12T17:48:35.000Z | enas/controller.py | dnddnjs/pytorch-vision | d432b467774f838bef37372d6cff3576c6559803 | [
"MIT"
] | 6 | 2018-10-11T01:29:39.000Z | 2019-05-29T23:44:49.000Z | enas/controller.py | dnddnjs/pytorch-vision | d432b467774f838bef37372d6cff3576c6559803 | [
"MIT"
] | 29 | 2018-11-14T14:01:16.000Z | 2021-12-07T00:17:41.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Controller(nn.Module):
def __init__(self):
super(Controller, self).__init__()
# constants
self.num_nodes = 7
self.lstm_size = 64
self.tanh_constant = 1.10
self.op_tanh_reduce = 2.5
self.additional_bias = torch.Tensor([0.25, 0.25, -0.25, -0.25, -0.25]).to(device)
# layers
self.embed_first = nn.Embedding(num_embeddings=1, embedding_dim=self.lstm_size)
self.embed_ops = nn.Embedding(num_embeddings=5, embedding_dim=self.lstm_size)
self.lstm = nn.LSTMCell(input_size=self.lstm_size, hidden_size=self.lstm_size, bias=False)
self.init_hidden(batch_size=1)
# fully-connected layers for index of previous cell outputs
self.fc_index_prev = nn.Linear(in_features=self.lstm_size, out_features=self.lstm_size, bias=False)
self.fc_index_curr = nn.Linear(in_features=self.lstm_size, out_features=self.lstm_size, bias=False)
self.fc_index_out = nn.Linear(in_features=self.lstm_size, out_features=1, bias=False)
# fully-connected layer for 5 operations
self.fc_ops = nn.Linear(in_features=self.lstm_size, out_features=5)
# init parameters
self.init_parameters()
def init_parameters(self):
torch.nn.init.xavier_uniform_(self.embed_first.weight)
torch.nn.init.xavier_uniform_(self.embed_ops.weight)
torch.nn.init.xavier_uniform_(self.lstm.weight_hh)
torch.nn.init.xavier_uniform_(self.lstm.weight_ih)
self.fc_ops.bias.data = torch.Tensor([10, 10, 0, 0, 0])
def init_hidden(self, batch_size):
self.hx = torch.zeros(batch_size, self.lstm_size).to(device)
self.cx = torch.zeros(batch_size, self.lstm_size).to(device)
# prev_lstm_outputs is a placeholder for saving previous cell's lstm output
# The linear transformation of lstm output is saved at prev_fc_outputs.
def sample_cell(self, arc_seq, entropy_list, log_prob_list, use_additional_bias):
inputs = torch.zeros(1).long().to(device)
inputs = self.embed_first(inputs)
# lstm should have a dynamic size of output for indices of previous layer.
# so save previous lstm outputs and fc outputs as a list
prev_lstm_outputs, prev_fc_outputs = list(), list()
for node_id in range(2):
hidden = (self.hx, self.cx)
self.hx, self.cx = self.lstm(inputs, hidden)
prev_lstm_outputs.append(torch.zeros_like(self.hx))
prev_fc_outputs.append(self.fc_index_prev(self.hx.clone()))
for node_id in range(2, self.num_nodes):
# sample 2 indices to select input of the node
for i in range(2):
hidden = (self.hx, self.cx)
self.hx, self.cx = self.lstm(inputs, hidden)
# todo: need to be fixed
logits = self.fc_index_curr(self.hx)
query = torch.cat(prev_fc_outputs)
query = torch.tanh(query + logits)
query = self.fc_index_out(query)
logits = query.view(query.size(-1), -1)
logits = self.tanh_constant * torch.tanh(logits)
probs = F.softmax(logits, dim=-1)
log_prob = F.log_softmax(logits, dim=-1)
action = torch.multinomial(probs, 1)[0]
arc_seq.append(action)
selected_log_prob = log_prob[:, action.long()]
entropy = -(log_prob * probs).sum(1, keepdim=False)
entropy_list.append(entropy)
log_prob_list.append(selected_log_prob)
# next input for lstm is the output of selected previous node index
inputs = prev_lstm_outputs[action]
# sample 2 operations for computation
for i in range(2):
hidden = (self.hx, self.cx)
self.hx, self.cx = self.lstm(inputs, hidden)
logits = self.fc_ops(self.hx)
logits = (self.tanh_constant / self.op_tanh_reduce) * torch.tanh(logits)
if use_additional_bias:
logits += self.additional_bias
probs = F.softmax(logits, dim=-1)
log_prob = F.log_softmax(logits, dim=-1)
action = torch.multinomial(probs, 1)[0]
arc_seq.append(action)
selected_log_prob = log_prob[:, action.long()]
entropy = -(log_prob * probs).sum(1, keepdim=False)
entropy_list.append(entropy)
log_prob_list.append(selected_log_prob)
inputs = self.embed_ops(action)
hidden = (self.hx, self.cx)
self.hx, self.cx = self.lstm(inputs, hidden)
prev_lstm_outputs.append(self.hx.clone())
prev_fc_outputs.append(self.fc_index_prev(self.hx.clone()))
inputs = torch.zeros(1).long().to(device)
inputs = self.embed_first(inputs)
return arc_seq, entropy_list, log_prob_list
# sample child model specifications
# this is micro controller so sample architecture for 2 cells(normal, reduction)
def sample_child(self):
# for each node, there is 4 indices for constructing architecture of the node.
# 2 previous node indices and 2 operation indices
normal_arc, reduction_arc = [], []
# entropy and log prob is for the training of controller
entropy_list, log_prob_list = [], []
# sample normal architecture
outputs = self.sample_cell(normal_arc, entropy_list, log_prob_list, True)
normal_arc, entropy_list, log_prob_list = outputs
# sample reduction architecture
outputs = self.sample_cell(reduction_arc, entropy_list, log_prob_list, True)
reduction_arc, entropy_list, log_prob_list = outputs
return normal_arc, reduction_arc, entropy_list, log_prob_list
| 37.402878 | 101 | 0.734757 |
792410a8878c6963d6cd8112b58e1a46e3656b36 | 391 | py | Python | Hotails/asgi.py | ErezCohenn/Beyond-07-team-1 | 37eed5bf1b0902b21f7c824acfd25634c40270db | [
"MIT"
] | 1 | 2022-03-03T12:03:17.000Z | 2022-03-03T12:03:17.000Z | Hotails/asgi.py | ErezCohenn/Beyond-07-team-1 | 37eed5bf1b0902b21f7c824acfd25634c40270db | [
"MIT"
] | 38 | 2022-03-07T14:14:48.000Z | 2022-03-31T18:37:52.000Z | Hotails/asgi.py | ErezCohenn/Beyond-07-team-1 | 37eed5bf1b0902b21f7c824acfd25634c40270db | [
"MIT"
] | 5 | 2022-02-28T18:55:09.000Z | 2022-03-06T08:04:40.000Z | """
ASGI config for Hotails project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Hotails.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
ed213be000f725f5747faae6289523d918b77a47 | 1,995 | py | Python | foo/lc/leetcode/editor/cn/[144]Binary Tree Preorder Traversal.py | JaeCoding/keepPying | 5bf07b34a6c63e9a6bd2b39c17149adb2dc59570 | [
"MIT"
] | 1 | 2020-02-24T15:15:55.000Z | 2020-02-24T15:15:55.000Z | foo/lc/leetcode/editor/cn/[144]Binary Tree Preorder Traversal.py | JaeCoding/keepPying | 5bf07b34a6c63e9a6bd2b39c17149adb2dc59570 | [
"MIT"
] | null | null | null | foo/lc/leetcode/editor/cn/[144]Binary Tree Preorder Traversal.py | JaeCoding/keepPying | 5bf07b34a6c63e9a6bd2b39c17149adb2dc59570 | [
"MIT"
] | null | null | null | # Given the root of a binary tree, return the preorder traversal of its nodes' v
# alues.
#
#
# Example 1:
#
#
# Input: root = [1,null,2,3]
# Output: [1,2,3]
#
#
# Example 2:
#
#
# Input: root = []
# Output: []
#
#
# Example 3:
#
#
# Input: root = [1]
# Output: [1]
#
#
# Example 4:
#
#
# Input: root = [1,2]
# Output: [1,2]
#
#
# Example 5:
#
#
# Input: root = [1,null,2]
# Output: [1,2]
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [0, 100].
# -100 <= Node.val <= 100
#
#
#
#
# Follow up:
#
# Recursive solution is trivial, could you do it iteratively?
#
#
# Related Topics 栈 树
# 👍 426 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from typing import List
from foo.lc.leetcode.editor.TreeNode import TreeNode
from foo.lc.leetcode.editor.TreeUtil import TreeUtil
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
result = []
def pre(node: TreeNode):
if not node:
return
result.append(node.val)
pre(node.left)
pre(node.right)
pre(root)
return result
def preorderTraversal2(self, root: TreeNode) -> List[int]:
result = []
stack = []
if root:
stack.append(root)
while stack:
node = stack.pop()
result.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return result
# root = TreeUtil.creat_tree([1,2,3,4,5,6,7])
root = TreeUtil.creat_tree([])
a = Solution().preorderTraversal2(root)
print(a)
# leetcode submit region end(Prohibit modification and deletion)
| 19 | 80 | 0.557895 |
5419578dbc621b84a223b528e23753aaa12b74cb | 5,263 | py | Python | ConvNN.py | pierremtb/modded-MNIST-digit-classification | 18f55fb00ca6a436712707ab1a21eafaf384553d | [
"MIT"
] | null | null | null | ConvNN.py | pierremtb/modded-MNIST-digit-classification | 18f55fb00ca6a436712707ab1a21eafaf384553d | [
"MIT"
] | null | null | null | ConvNN.py | pierremtb/modded-MNIST-digit-classification | 18f55fb00ca6a436712707ab1a21eafaf384553d | [
"MIT"
] | null | null | null | # Michael Segev
# Pierre Jacquier
# Albert Faucher
# Group 70
# COMP 551 MP3
# March 18 2019
import torch
import torch.nn as nn
import torch.optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import math
from helpers import *
class ConvNN(torch.nn.Module):
def __init__(self):
super(ConvNN, self).__init__() # call the inherited class constructor
print("Model: ConvNN")
# define the architecture of the neural network
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5), # output is 60x60
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.MaxPool2d(2, 2) # output is 30x30
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5), # output is 26x26
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.MaxPool2d(2, 2) # output is 13x13
)
self.linear1 = nn.Sequential(
torch.nn.Linear(64*13*13, 1000),
nn.ReLU(True)
)
self.linear2 = nn.Sequential(
torch.nn.Linear(1000, 200),
nn.ReLU(True)
)
self.linear3 = torch.nn.Linear(200, 10)
self.losses = []
self.accuracies = []
self.val_accuracies = []
self.loss_LPF = 2.3
self.criterion = None
self.optimizer = None
def init_optimizer(self):
# loss function
# self.criterion = torch.nn.MSELoss(reduction='sum')
self.criterion = torch.nn.CrossEntropyLoss()
# optimizer
lr = 1e-2
print("Learning rate: {}".format(lr))
# self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)
self.optimizer = torch.optim.SGD(self.parameters(), lr=lr, momentum=0.9)
def forward(self, x):
h = self.conv1(x)
h = self.conv2(h)
h = h.reshape(h.size(0), -1)
h = self.linear1(h)
h = self.linear2(h)
y_pred = self.linear3(h)
return y_pred
def train_batch(self, x, y):
# Forward pass: Compute predicted y by passing x to the model
y_pred = self(x)
# Compute and print loss
loss = self.criterion(y_pred, y)
self.losses.append(float(loss.data.item()))
# Record accuracy
total = y.size(0)
_, predicted = torch.max(y_pred.data, 1)
correct = (predicted == y).sum().item()
acc = correct / total
self.accuracies.append(acc)
# Reset gradients to zero, perform a backward pass, and update the weights.
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss, acc
def train_all_batches(self, x, y, batch_size, num_epochs, loss_target, device, x_val=[], y_val=[], val_skip=0):
# figure out how many batches we can make
num_batches = int(y.shape[0] / batch_size)
last_batch_size = batch_size
print("Number of batches = {}".format(num_batches))
if y.shape[0] % batch_size != 0:
num_batches += 1
last_batch_size = y.shape[0] % batch_size
for epoch in range(num_epochs):
if self.loss_LPF < loss_target:
print("reached loss target, ending early!")
break
for batch_num in range(num_batches):
# slice tensors according into requested batch
if batch_num == num_batches - 1:
# last batch logic!
# print("Last batch!")
current_batch_size = last_batch_size
else:
current_batch_size = batch_size
x_batch = torch.tensor(
x[batch_num * current_batch_size:batch_num * current_batch_size + current_batch_size],
dtype=torch.float32, requires_grad=True, device=device)
y_batch = torch.tensor(
y[batch_num * current_batch_size:batch_num * current_batch_size + current_batch_size],
dtype=torch.long, requires_grad=False, device=device)
loss, acc = self.train_batch(x_batch, y_batch)
self.loss_LPF = 0.01 * float(loss.data.item()) + 0.99*self.loss_LPF
val_acc = 0
if batch_num % ((val_skip + 1) * 40) == 0 and len(x_val) == len(y_val) and len(x_val) > 0:
val_acc = validate_data(self, x_val, y_val, device)
self.val_accuracies.append(val_acc)
if batch_num % 40 == 0:
toPrint = "Epoch: {}, Loss: {}, Acc: {}%".format(epoch, self.loss_LPF, round(acc * 100, 3))
if (val_acc > 0):
toPrint += ", ValAcc: {}%".format(round(val_acc * 100, 3))
print(toPrint)
def plot_loss(self):
plt.title('Loss over time')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(self.losses)
plt.show()
def plot_acc(self):
plt.title('Accuracy over time')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(self.accuracies)
plt.plot(self.val_accuracies)
plt.show()
| 34.398693 | 115 | 0.559947 |
065e304b12cabb5b6c6f2e24116af47fd95a0efd | 10,066 | py | Python | src/files.py | agtoever/twixtbot-ui | 366d7bef33fdbaa260ea8b3330fa9ab29ad05f03 | [
"MIT"
] | null | null | null | src/files.py | agtoever/twixtbot-ui | 366d7bef33fdbaa260ea8b3330fa9ab29ad05f03 | [
"MIT"
] | 2 | 2021-11-10T20:13:45.000Z | 2022-01-12T07:38:04.000Z | src/files.py | agtoever/twixtbot-ui | 366d7bef33fdbaa260ea8b3330fa9ab29ad05f03 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
from backend import twixt
import layout as lt
import string
def str2twixt(move):
""" Converts one move string to a twixt backend class move.
Handles both T1-style coordinates (e.g.: 'd5', 'f18'') as well as tsgf-
style coordinates (e.g.: 'fg', 'bi') as well as special strings
('swap' and 'resign'). It can handle letter in upper as well as lowercase.
Args:
move: string with a move
Returns:
twixt.SWAP or twixt.RESIGN or twixt.Point
Raises
ValueError if the move_str can't be parsed in any valid format
Examples:
>>> str2twixt('b3')
b3
>>> str2twixt('i18')
i18
>>> str2twixt('fj')
f10
>>> str2twixt('swap')
'swap'
>>> str2twixt('resign')
'resign'
>>> str2twixt('123')
ValueError: Can't parse move: '123'
>>> str2twixt('invalid')
ValueError: Can't parse move: 'invalid'
"""
# Handle swap and resign
if move.lower() == twixt.SWAP.lower():
return twixt.SWAP
elif move.lower() == twixt.RESIGN.lower():
return twixt.RESIGN
# Handle T1-style moves
elif move[0] in string.ascii_letters and move[-1] in string.digits:
return twixt.Point(move)
# Handle tsgf-stype moves
elif len(move) == 2 and all(c in string.ascii_letters for c in move):
return twixt.Point(move[0] + str(ord(move[1].lower()) - ord('a') + 1))
# Can't handle move. Throw exception
raise ValueError(f"Can't parse move: '{move}'")
def parse_t1_file(content):
"""Returns (players, moves) from a list of strings from a T1 file
Args:
content: list of strings: content from a T1 file
Returns:
tuple: (list: players as strings, list: twixt moves)
Raises:
ValueError: if players or moves data can't be interpreted
Examples:
>>> content = [
'# File created by T1j',
'# T1j is a program to play TwixT ([email protected])',
'1 # version of file-format',
'Player# Name of Player 1',
'Computer# Name of Player 2',
'24# y-size of board',
'24# x-size of board',
'H# player 1 human or computer',
'C# player 2 human or computer',
'1# starting player (1 plays top-down)',
'V# Direction of letters',
'N# pierule?',
'N# game already over?',
'L10', 'L17', 'Q15', 'Q8', 'S12', 'P11', 'O14', 'P19', 'V18',
'U15', 'V16', 'T17', 'U14', 'V17', 'W16', 'W15', 'F16', 'L19',
'F20', 'I14', 'F12', 'X13', 'G14', 'G8', 'I9', 'J9', 'J7',
'E9', 'G10', 'N18', 'J3', 'G20', 'G18', 'E21']
>>> parse_t1_file(content)
(['Player', 'Computer'],
[l10, l17, q15, q8, s12, p11, o14, p19, v18, u15, v16, t17,
u14, v17, w16, w15, f16, l19, f20, i14, f12, x13, g14, g8, i9,
j9, j7, e9, g10, n18, j3, g20, g18, e21])
"""
MOVES_STARTLINE = 13
PLAYER_LINES = [3, 4]
COMMENT_CHAR = '#'
try:
players = [content[linenr].split(COMMENT_CHAR)[0]
for linenr in PLAYER_LINES]
except Exception:
raise ValueError("Can't read player names from T1 file")
try:
moves = [str2twixt(move) for move in content[MOVES_STARTLINE:]
if len(move) > 0]
except Exception:
# Just pass on the exception from str2twixt
raise
return players, moves
def parse_tsgf_file(content):
"""Returns (players, moves) from a list of strings from a tsgf file
Args:
content: list of strings: content from a tsgf file
Returns:
tuple: (list: players as strings, list: twixt moves)
Raises:
ValueError: if players or moves data can't be interpreted
Examples:
>>> content = [
('(;FF[4]EV[twixt.ld.DEFAULT]PB[agtoever]PW[Jan Krabbenbos]SZ[24]'
'SO[https://www.littlegolem.net];b[pl];r[ps];b[pr];r[rt];b[ot];'
'r[po];b[pn];r[qq];b[op];r[pg];b[nh];r[oj];b[oi];r[qi];b[nk];'
'r[nf];b[mf])')]
>>> parse_tsgf_file(content)
(['agtoever', 'Jan Krabbenbos'], [p12, p19, p18, r20, o20, p15,
p14, q17, o16, p7, n8, o10, o9,
q9, n11, n6, m6])
"""
PLAYERS_STR = ('PB', 'PW')
TURN_STR = ('r[', 'b[')
FIELD_SEP = ';'
if len(content) > 1:
raise ValueError('Found more than 1 line in a tsgf file.')
try:
player_idx = [content[0].find(key) for key in PLAYERS_STR]
players = [content[0][idx + 3:content[0].find(']', idx)]
for idx in player_idx]
except Exception:
raise ValueError("Can't read player names from tsgf file")
try:
raw_moves = [field[2:field.find('|')
if '|' in field else field.find(']')]
for field in content[0].split(FIELD_SEP)
if field[:2] in TURN_STR]
moves = list(map(str2twixt, raw_moves))
except Exception:
# Just pass on the exception from str2twixt
raise
return players, moves
def get_game(curent_cross_lines_setting=False):
"""Returns (players, moves) from a file, chosen by the user
Shows a file-open dialog to the user.
The chosen file is read and parsed into players and moves.
If the file is a tsgf file, the user is asked if the setting
to allow crossing lines should be enabled, because Little Golem
plays with corring lines allowed by default.
The resulting player name list and moves list is returned.
Finally a boolean is returned, which indicates if crossing lines
should be set to enabled (True) or if it should be left
in the current state (False).
Exceptions that occur while opening and/or parsing the file
are handled within this function.
Args:
curent_cross_lines_setting (bool): current setting for crossing lines
Returns:
tuple: (list: players as strings,
list: twixt moves,
bool: enable_crossing_lines)
"""
RETURN_ON_FAILURE = None, None, False
# Get filename
file_name = sg.PopupGetFile('Choose file', file_types=(
("All Files", "*.*"),
("T1j Files", "*.T1"),
("Little Golem Files", "*.tsgf")), no_window=True, keep_on_top=True)
if file_name is None or file_name == "":
return RETURN_ON_FAILURE
# Open file
try:
with open(file_name, "tr") as f:
content = list(map(lambda s: s.strip(), f.readlines()))
except Exception:
sg.popup_ok(f"Can't open {file_name} as a valid Twixt file.")
return RETURN_ON_FAILURE
# Parse file
try:
if file_name[-2:].upper() == 'T1':
players, moves = parse_t1_file(content)
return players, moves, False
elif file_name[-4:].lower() == 'tsgf':
enable_crossing_lines = False
if not curent_cross_lines_setting:
enable_crossing_lines = sg.popup_yes_no(
"You have opened a .tsgf file, which propably comes "
"from LittleGolem. By default, LittleGolem allows "
"crossing lines. You don't have crossing lines enabled. "
"Do you want to enable crossing lines?",
title='Enable crossing lines?') == "Yes"
players, moves = parse_tsgf_file(content)
return players, moves, enable_crossing_lines
else:
lt.popup("Didn't recognize the filename extension.")
except Exception as e:
sg.popup_ok(f"Error '{e}' while opening file {file_name}")
return RETURN_ON_FAILURE
def save_game(players=['Player1', 'Player2'],
moves=[''],
board_size=24,
game_over=False):
""" Saves a Twixt game to T1 file, chosen by the user
Shows a file-save dialog to the user.
The twixt game given by the function parameters are saved to the file.
Only .T1 file format is currently supported.
Exceptions that occur while saving the file are handled within
this function.
Args:
players: list of two strings with player names
moves: list of twixt moves
board_size: int with board size (defaults to 24)
game_over: boolean, true if the game is over (defaults to False)
Returns:
None
"""
# Get filename
file_name = sg.PopupGetFile('Choose file', file_types=(
("T1j Files", "*.T1"),),
no_window=True, save_as=True, keep_on_top=True)
if file_name is None or file_name == "":
return
# Build file contents
try:
content = [
'# File created by twixtbot-ui',
('# twixtbot-ui is a program to play TwixtT '
'(https://github.com/stevens68/twixtbot-ui)'),
'1 # version of file-format',
str(players[0]) + ' # Name of player 1',
str(players[1]) + ' # Name of player 2',
str(board_size) + ' # y-size of board',
str(board_size) + ' # x-size of board',
'H # player 1 human or computer',
'H # player 2 human or computer',
'1 # starting player (1 plays top-down)',
'V # direction of letters',
'Y # pierule?',
('Y' if game_over else 'N') + ' # game already over?'
]
content += [str(m).upper() for m in moves]
except Exception as e:
sg.popup_ok('Could not create file contents. Game is NOT saved!\n'
f'Python error: {e}')
return
# Write file
try:
with open(file_name, "tw") as f:
f.write('\n'.join(content))
except Exception:
sg.popup_ok(f"Can't write {file_name}. Game is NOT saved!")
return
sg.popup_ok(f'Game saved successfully as {file_name}')
return
| 33.892256 | 78 | 0.566859 |
358435ab6938a9d40de3109dec9caa90f34c2864 | 1,034 | py | Python | jdcloud_sdk/services/disk/client/DiskClient.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/disk/client/DiskClient.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/disk/client/DiskClient.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudclient import JDCloudClient
from jdcloud_sdk.core.config import Config
class DiskClient(JDCloudClient):
def __init__(self, credential, config=None, logger=None):
if config is None:
config = Config('disk.jdcloud-api.com')
super(DiskClient, self).__init__(credential, config, 'disk', '0.12.7', logger)
| 34.466667 | 86 | 0.744681 |
e99aa428e4ebfd014ec1d6c28b062b86d539e43b | 3,851 | py | Python | Vision/AnalyzeFormV2/AnalyzeForm/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 128 | 2019-06-12T19:24:34.000Z | 2022-03-08T18:39:40.000Z | Vision/AnalyzeFormV2/AnalyzeForm/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 47 | 2019-07-15T22:04:23.000Z | 2022-03-04T18:35:57.000Z | Vision/AnalyzeFormV2/AnalyzeForm/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 99 | 2019-06-28T20:56:21.000Z | 2022-03-30T17:17:24.000Z | import logging
import json
import os
import logging
import pathlib
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer import FormRecognizerClient
from azure.ai.formrecognizer import FormTrainingClient
from azure.core.credentials import AzureKeyCredential
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Invoked AnalyzeForm Skill.')
try:
body = json.dumps(req.get_json())
if body:
# For testing uncomment the following line to log the incoming request
#logging.info(body)
result = compose_response(body)
return func.HttpResponse(result, mimetype="application/json")
else:
return func.HttpResponse(
"The body of the request could not be parsed",
status_code=400
)
except ValueError:
return func.HttpResponse(
"The body of the request could not be parsed",
status_code=400
)
except KeyError:
return func.HttpResponse(
"Skill configuration error. Endpoint, key and model_id required.",
status_code=400
)
except AssertionError as error:
return func.HttpResponse(
"Request format is not a valid custom skill input",
status_code=400
)
def compose_response(json_data):
body = json.loads(json_data)
assert ('values' in body), "request does not implement the custom skill interface"
values = body['values']
# Prepare the Output before the loop
results = {}
results["values"] = []
mappings = None
with open(pathlib.Path(__file__).parent / 'field_mappings.json') as file:
mappings = json.loads(file.read())
endpoint = os.environ["FORMS_RECOGNIZER_ENDPOINT"]
key = os.environ["FORMS_RECOGNIZER_KEY"]
model_id = os.environ["FORMS_RECOGNIZER_MODEL_ID"]
form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
for value in values:
output_record = transform_value(value, mappings, form_recognizer_client, model_id)
if output_record != None:
results["values"].append(output_record)
break
return json.dumps(results, ensure_ascii=False)
## Perform an operation on a record
def transform_value(value, mappings, form_recognizer_client,model_id):
try:
recordId = value['recordId']
except AssertionError as error:
return None
try:
assert ('data' in value), "'data' field is required."
data = value['data']
formUrl = data['formUrl']
formSasToken = data ['formSasToken']
formUrl = formUrl + formSasToken
poller = form_recognizer_client.begin_recognize_custom_forms_from_url(
model_id=model_id, form_url=formUrl)
result = poller.result()
recognized = {}
for recognized_form in result:
print("Form type: {}".format(recognized_form.form_type))
for name, field in recognized_form.fields.items():
label = field.label_data.text if field.label_data else name
for (k, v) in mappings.items():
if(label == k):
recognized[v] = field.value
except AssertionError as error:
return (
{
"recordId": recordId,
"errors": [ { "message": "Error:" + error.args[0] } ]
})
except Exception as error:
return (
{
"recordId": recordId,
"errors": [ { "message": "Error:" + str(error) } ]
})
return ({
"recordId": recordId,
"data": {
"recognized": recognized
}
})
| 36.67619 | 90 | 0.609192 |
8eb19cb5bb6e480425577d4672af1bfd07e3f193 | 265 | py | Python | tests/artificial/transf_BoxCox/trend_LinearTrend/cycle_7/ar_12/test_artificial_1024_BoxCox_LinearTrend_7_12_20.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_BoxCox/trend_LinearTrend/cycle_7/ar_12/test_artificial_1024_BoxCox_LinearTrend_7_12_20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_BoxCox/trend_LinearTrend/cycle_7/ar_12/test_artificial_1024_BoxCox_LinearTrend_7_12_20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 7, transform = "BoxCox", sigma = 0.0, exog_count = 20, ar_order = 12); | 37.857143 | 165 | 0.732075 |
972ed5804f9aeebbf33d60956e0968a1325d4660 | 1,106 | py | Python | aiotdlib/api/types/sessions.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 37 | 2021-05-04T10:41:41.000Z | 2022-03-30T13:48:05.000Z | aiotdlib/api/types/sessions.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 13 | 2021-07-17T19:54:51.000Z | 2022-02-26T06:50:00.000Z | aiotdlib/api/types/sessions.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 7 | 2021-09-22T21:27:11.000Z | 2022-02-20T02:33:19.000Z | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .session import Session
from ..base_object import BaseObject
class Sessions(BaseObject):
"""
Contains a list of sessions
:param sessions: List of sessions
:type sessions: :class:`list[Session]`
:param inactive_session_ttl_days: Number of days of inactivity before sessions will automatically be terminated; 1-366 days
:type inactive_session_ttl_days: :class:`int`
"""
ID: str = Field("sessions", alias="@type")
sessions: list[Session]
inactive_session_ttl_days: int
@staticmethod
def read(q: dict) -> Sessions:
return Sessions.construct(**q)
| 33.515152 | 127 | 0.490054 |
24b4d02aa4b0856bf30f24067c64833ea23cd95a | 669 | py | Python | training_site/manage.py | janvorac/guess-the-number | 89ba9b70525b6b6d11541372ade6e0d1a48a5543 | [
"MIT"
] | null | null | null | training_site/manage.py | janvorac/guess-the-number | 89ba9b70525b6b6d11541372ade6e0d1a48a5543 | [
"MIT"
] | 7 | 2022-01-25T08:44:26.000Z | 2022-02-02T09:07:38.000Z | training_site/manage.py | janvorac/guess-the-number | 89ba9b70525b6b6d11541372ade6e0d1a48a5543 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'training_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.086957 | 77 | 0.681614 |
88f14a49d414c0b69f5c82bdfa2f989bb01c57bf | 7,927 | py | Python | plugin/references.py | Narretz/LSP | 28ecbb2221956781222fcf2aaa8ceb54c6a34f35 | [
"MIT"
] | null | null | null | plugin/references.py | Narretz/LSP | 28ecbb2221956781222fcf2aaa8ceb54c6a34f35 | [
"MIT"
] | null | null | null | plugin/references.py | Narretz/LSP | 28ecbb2221956781222fcf2aaa8ceb54c6a34f35 | [
"MIT"
] | null | null | null | import os
import sublime
import linecache
from .core.documents import is_at_word, get_position, get_document_position
from .core.panels import ensure_panel
from .core.protocol import Request, Point
from .core.registry import LspTextCommand, windows
from .core.settings import PLUGIN_NAME, settings
from .core.url import uri_to_filename
from .core.views import get_line
try:
from typing import List, Dict, Optional, Callable, Tuple
from mypy_extensions import TypedDict
assert List and Dict and Optional and Callable and Tuple and TypedDict
ReferenceDict = TypedDict('ReferenceDict', {'uri': str, 'range': dict})
except ImportError:
pass
def ensure_references_panel(window: sublime.Window) -> 'Optional[sublime.View]':
return ensure_panel(window, "references", r"^\s*\S\s+(\S.*):$", r"^\s+([0-9]+):?([0-9]+).*$",
"Packages/" + PLUGIN_NAME + "/Syntaxes/References.sublime-syntax")
class LspSymbolReferencesCommand(LspTextCommand):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.reflist = [] # type: List[List[str]]
self.word_region = None # type: Optional[sublime.Region]
self.word = ""
self.base_dir = None # type: Optional[str]
def is_enabled(self, event: 'Optional[dict]' = None) -> bool:
if self.has_client_with_capability('referencesProvider'):
return is_at_word(self.view, event)
return False
def run(self, edit: sublime.Edit, event: 'Optional[dict]' = None) -> None:
client = self.client_with_capability('referencesProvider')
file_path = self.view.file_name()
if client and file_path:
pos = get_position(self.view, event)
window = self.view.window()
self.word_region = self.view.word(pos)
self.word = self.view.substr(self.word_region)
# use relative paths if file on the same root.
base_dir = windows.lookup(window).get_project_path()
if base_dir:
if os.path.commonprefix([base_dir, file_path]):
self.base_dir = base_dir
document_position = get_document_position(self.view, pos)
if document_position:
document_position['context'] = {
"includeDeclaration": False
}
request = Request.references(document_position)
client.send_request(
request, lambda response: self.handle_response(response, pos))
def handle_response(self, response: 'Optional[List[ReferenceDict]]', pos: int) -> None:
window = self.view.window()
if response is None:
response = []
if window:
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
references_by_file = self._group_references_by_file(response)
if settings.show_references_in_quick_panel:
self.show_quick_panel(references_by_file)
else:
self.show_references_panel(references_by_file)
def show_quick_panel(self, references_by_file: 'Dict[str, List[Tuple[Point, str]]]') -> None:
selected_index = -1
current_file_path = self.view.file_name()
for file_path, references in references_by_file.items():
for reference in references:
point, line = reference
item = ['{}:{}:{}'.format(self.get_relative_path(file_path), point.row + 1, point.col + 1), line]
self.reflist.append(item)
# pre-select a reference in the current file.
if current_file_path == file_path and selected_index == -1:
selected_index = len(self.reflist) - 1
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
if settings.quick_panel_monospace_font:
flags |= sublime.MONOSPACE_FONT
window = self.view.window()
if window:
window.show_quick_panel(
self.reflist,
self.on_ref_choice,
flags,
selected_index,
self.on_ref_highlight
)
def on_ref_choice(self, index: int) -> None:
self.open_ref_index(index)
def on_ref_highlight(self, index: int) -> None:
self.open_ref_index(index, transient=True)
def open_ref_index(self, index: int, transient: bool=False) -> None:
if index != -1:
flags = sublime.ENCODED_POSITION | sublime.TRANSIENT if transient else sublime.ENCODED_POSITION
window = self.view.window()
if window:
window.open_file(self.get_selected_file_path(index), flags)
def show_references_panel(self, references_by_file: 'Dict[str, List[Tuple[Point, str]]]') -> None:
window = self.view.window()
if window:
panel = ensure_references_panel(window)
if not panel:
return
text = ''
references_count = 0
for file, references in references_by_file.items():
text += '◌ {}:\n'.format(self.get_relative_path(file))
for reference in references:
references_count += 1
point, line = reference
text += '\t{:>8}:{:<4} {}\n'.format(point.row + 1, point.col + 1, line)
# append a new line after each file name
text += '\n'
base_dir = windows.lookup(window).get_project_path()
panel.settings().set("result_base_dir", base_dir)
panel.set_read_only(False)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command('append', {
'characters': "{} references for '{}'\n\n{}".format(references_count, self.word, text),
'force': True,
'scroll_to_end': False
})
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(self.word))
panel.add_regions('ReferenceHighlight', regions, 'comment', flags=sublime.DRAW_OUTLINED)
panel.set_read_only(True)
def get_selected_file_path(self, index: int) -> str:
return self.get_full_path(self.reflist[index][0])
def get_relative_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.relpath(file_path, self.base_dir)
else:
return file_path
def get_full_path(self, file_path: str) -> str:
if self.base_dir:
return os.path.join(self.base_dir, file_path)
return file_path
def want_event(self) -> bool:
return True
def _group_references_by_file(self, references: 'List[ReferenceDict]'
) -> 'Dict[str, List[Tuple[Point, str]]]':
""" Return a dictionary that groups references by the file it belongs. """
grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]
for reference in references:
file_path = uri_to_filename(reference["uri"])
point = Point.from_lsp(reference['range']['start'])
# get line of the reference, to showcase its use
reference_line = get_line(self.view.window(), file_path, point.row)
if grouped_references.get(file_path) is None:
grouped_references[file_path] = []
grouped_references[file_path].append((point, reference_line))
# we don't want to cache the line, we always want to get fresh data
linecache.clearcache()
return grouped_references
| 40.651282 | 113 | 0.606787 |
8035a3de023aa6e3d5ea47b099145fa484f4830c | 16,547 | py | Python | src/encoded/batch_download.py | beta-cell-network/beta-cell-nw | 093b078fdb7932ebfcbc0715aeeb2261eda3ee52 | [
"MIT"
] | 4 | 2018-01-04T22:31:08.000Z | 2021-07-15T17:39:16.000Z | src/encoded/batch_download.py | beta-cell-network/beta-cell-nw | 093b078fdb7932ebfcbc0715aeeb2261eda3ee52 | [
"MIT"
] | 7 | 2017-10-31T23:47:47.000Z | 2022-01-10T00:12:42.000Z | src/encoded/batch_download.py | beta-cell-network/beta-cell-nw | 093b078fdb7932ebfcbc0715aeeb2261eda3ee52 | [
"MIT"
] | 10 | 2017-09-14T00:57:07.000Z | 2021-07-27T23:41:14.000Z | from collections import OrderedDict
from pyramid.compat import bytes_
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.view import view_config
from pyramid.response import Response
from snovault import TYPES
from snovault.util import simple_path_ids
from urllib.parse import (
parse_qs,
urlencode,
)
from .search import iter_search_results
from .search import list_visible_columns_for_schemas
import csv
import io
import json
import datetime
import logging
import re
log = logging.getLogger(__name__)
currenttime = datetime.datetime.now()
def includeme(config):
config.add_route('batch_download', '/batch_download/{search_params}')
config.add_route('metadata', '/metadata/{search_params}/{tsv}')
config.add_route('peak_download', '/peak_download/{search_params}/{tsv}')
config.add_route('report_download', '/report.tsv')
config.scan(__name__)
# includes concatenated properties
_tsv_mapping = OrderedDict([
('File accession', ['files.title']),
('File format', ['files.file_type']),
('Output type', ['files.output_type']),
('Experiment accession', ['accession']),
('Annotation accession', ['accession']),
('Assay', ['assay_term_name']),
('Annotation', ['annotation_type']),
('Biosample term id', ['biosample_term_id']),
('Biosample term name', ['biosample_term_name']),
('Biosample type', ['biosample_type']),
('Biosample life stage', ['replicates.library.biosample.life_stage']),
('Biosample sex', ['replicates.library.biosample.sex']),
('Biosample Age', ['replicates.library.biosample.age',
'replicates.library.biosample.age_units']),
('Biosample organism', ['replicates.library.biosample.organism.scientific_name']),
('Biosample treatments', ['replicates.library.biosample.treatments.treatment_term_name']),
('Biosample subcellular fraction term name', ['replicates.library.biosample.subcellular_fraction_term_name']),
('Biosample phase', ['replicates.library.biosample.phase']),
('Biosample synchronization stage', ['replicates.library.biosample.fly_synchronization_stage',
'replicates.library.biosample.worm_synchronization_stage',
'replicates.library.biosample.post_synchronization_time',
'replicates.library.biosample.post_synchronization_time_units']),
('Experiment target', ['target.name']),
('Antibody accession', ['replicates.antibody.accession']),
('Library made from', ['replicates.library.nucleic_acid_term_name']),
('Library depleted in', ['replicates.library.depleted_in_term_name']),
('Library extraction method', ['replicates.library.extraction_method']),
('Library lysis method', ['replicates.library.lysis_method']),
('Library crosslinking method', ['replicates.library.crosslinking_method']),
('Library strand specific', ['replicates.library.strand_specificity']),
('Experiment date released', ['date_released']),
('Project', ['award.project']),
('RBNS protein concentration', ['files.replicate.rbns_protein_concentration', 'files.replicate.rbns_protein_concentration_units']),
('Library fragmentation method', ['files.replicate.library.fragmentation_method']),
('Library size range', ['files.replicate.library.size_range']),
('Biological replicate(s)', ['files.biological_replicates']),
('Technical replicate', ['files.replicate.technical_replicate_number']),
('Read length', ['files.read_length']),
('Mapped read length', ['files.mapped_read_length']),
('Run type', ['files.run_type']),
('Paired end', ['files.paired_end']),
('Paired with', ['files.paired_with']),
('Derived from', ['files.derived_from']),
('Size', ['files.file_size']),
('Lab', ['files.lab.title']),
('md5sum', ['files.md5sum']),
('dbxrefs', ['files.dbxrefs']),
('file_format', ['files.file_format']),
('File download URL', ['files.href']),
('Assembly', ['files.assembly']),
('Platform', ['files.platform.title']),
('Controlled by', ['files.controlled_by']),
('File Status', ['files.status'])
])
_audit_mapping = OrderedDict([
('Audit WARNING', ['audit.WARNING.path',
'audit.WARNING.category',
'audit.WARNING.detail']),
('Audit INTERNAL_ACTION', ['audit.INTERNAL_ACTION.path',
'audit.INTERNAL_ACTION.category',
'audit.INTERNAL_ACTION.detail']),
('Audit NOT_COMPLIANT', ['audit.NOT_COMPLIANT.path',
'audit.NOT_COMPLIANT.category',
'audit.NOT_COMPLIANT.detail']),
('Audit ERROR', ['audit.ERROR.path',
'audit.ERROR.category',
'audit.ERROR.detail'])
])
def get_file_uuids(result_dict):
file_uuids = []
for item in result_dict['@graph']:
for file in item['files']:
file_uuids.append(file['uuid'])
return list(set(file_uuids))
def get_biosample_accessions(file_json, experiment_json):
for f in experiment_json['files']:
if file_json['uuid'] == f['uuid']:
accession = f.get('replicate', {}).get('library', {}).get('biosample', {}).get('accession')
if accession:
return accession
accessions = []
for replicate in experiment_json.get('replicates', []):
accession = replicate['library']['biosample']['accession']
accessions.append(accession)
return ', '.join(list(set(accessions)))
def get_peak_metadata_links(request):
if request.matchdict.get('search_params'):
search_params = request.matchdict['search_params']
else:
search_params = request.query_string
peak_metadata_tsv_link = '{host_url}/peak_metadata/{search_params}/peak_metadata.tsv'.format(
host_url=request.host_url,
search_params=search_params
)
peak_metadata_json_link = '{host_url}/peak_metadata/{search_params}/peak_metadata.json'.format(
host_url=request.host_url,
search_params=search_params
)
return [peak_metadata_tsv_link, peak_metadata_json_link]
def make_cell(header_column, row, exp_data_row):
temp = []
for column in _tsv_mapping[header_column]:
c_value = []
for value in simple_path_ids(row, column):
if str(value) not in c_value:
c_value.append(str(value))
if column == 'replicates.library.biosample.post_synchronization_time' and len(temp):
if len(c_value):
temp[0] = temp[0] + ' + ' + c_value[0]
elif len(temp):
if len(c_value):
temp = [x + ' ' + c_value[0] for x in temp]
else:
temp = c_value
exp_data_row.append(', '.join(list(set(temp))))
def make_audit_cell(header_column, experiment_json, file_json):
categories = []
paths = []
for column in _audit_mapping[header_column]:
for value in simple_path_ids(experiment_json, column):
if 'path' in column:
paths.append(value)
elif 'category' in column:
categories.append(value)
data = []
for i, path in enumerate(paths):
if '/files/' in path and file_json.get('title', '') not in path:
# Skip file audits that does't belong to the file
continue
else:
data.append(categories[i])
return ', '.join(list(set(data)))
@view_config(route_name='peak_download', request_method='GET')
def peak_download(context, request):
param_list = parse_qs(request.matchdict['search_params'])
param_list['field'] = []
header = ['annotation_type', 'coordinates', 'biosample.accession', 'file.accession', 'annotation.accession']
param_list['limit'] = ['all']
path = '/variant-search/?{}&{}'.format(urlencode(param_list, True),'referrer=peak_download')
results = request.embed(path, as_user=True)
uuids_in_results = get_file_uuids(results)
rows = []
json_doc = {}
for row in results['peaks']:
if row['_id'] in uuids_in_results:
file_json = request.embed(row['_id'])
annotation_json = request.embed(file_json['dataset'])
for hit in row['inner_hits']['positions']['hits']['hits']:
data_row = []
chrom = '{}'.format(row['_index'])
assembly = '{}'.format(row['_type'])
coordinates = '{}:{}-{}'.format(row['_index'], hit['_source']['start'], hit['_source']['end'])
file_accession = file_json['accession']
annotation_accession = annotation_json['accession']
annotation = annotation_json['annotation_type']
biosample_term = annotation_json['biosample_term_name']
data_row.extend([annotation, biosample_term, coordinates, file_accession, annotation_accession])
rows.append(data_row)
fout = io.StringIO()
writer = csv.writer(fout, delimiter='\t')
writer.writerow(header)
writer.writerows(rows)
return Response(
content_type='text/tsv',
body=fout.getvalue(),
content_disposition='attachment;filename="%s"' % 'peak_metadata.tsv'
)
@view_config(route_name='metadata', request_method='GET')
def metadata_tsv(context, request):
param_list = parse_qs(request.matchdict['search_params'])
if 'referrer' in param_list:
search_path = '/{}/'.format(param_list.pop('referrer')[0])
else:
search_path = '/search/'
param_list['field'] = []
header = []
file_attributes = []
for prop in _tsv_mapping:
header.append(prop)
param_list['field'] = param_list['field'] + _tsv_mapping[prop]
if _tsv_mapping[prop][0].startswith('files'):
file_attributes = file_attributes + [_tsv_mapping[prop][0]]
param_list['limit'] = ['all']
path = '{}?{}'.format(search_path, urlencode(param_list, True))
results = request.embed(path, as_user=True)
rows = []
for experiment_json in results['@graph']:
#log.warn(results['@graph'])
for f in experiment_json.get('files', []):
exp_data_row = []
for column in header:
if not _tsv_mapping[column][0].startswith('files'):
make_cell(column, experiment_json, exp_data_row)
f_attributes = ['files.title', 'files.file_type',
'files.output_type']
for f in experiment_json['files']:
if 'files.file_type' in param_list:
if f['file_type'] not in param_list['files.file_type']:
continue
f['href'] = request.host_url + f['href']
f_row = []
for attr in f_attributes:
f_row.append(f[attr[6:]])
data_row = f_row + exp_data_row
for prop in file_attributes:
if prop in f_attributes:
continue
path = prop[6:]
temp = []
for value in simple_path_ids(f, path):
temp.append(str(value))
if prop == 'files.replicate.rbns_protein_concentration':
if 'replicate' in f and 'rbns_protein_concentration_units' in f['replicate']:
temp[0] = temp[0] + ' ' + f['replicate']['rbns_protein_concentration_units']
if prop in ['files.paired_with', 'files.derived_from']:
# chopping of path to just accession
if len(temp):
new_values = [t[7:-1] for t in temp]
temp = new_values
data = list(set(temp))
data.sort()
data_row.append(', '.join(data))
audit_info = [make_audit_cell(audit_type, experiment_json, f) for audit_type in _audit_mapping]
data_row.extend(audit_info)
rows.append(data_row)
fout = io.StringIO()
writer = csv.writer(fout, delimiter='\t')
header.extend([prop for prop in _audit_mapping])
writer.writerow(header)
writer.writerows(rows)
return Response(
content_type='text/tsv',
body=fout.getvalue(),
content_disposition='attachment;filename="%s"' % 'metadata.tsv'
)
@view_config(route_name='batch_download', request_method='GET')
def batch_download(context, request):
# adding extra params to get required columns
param_list = parse_qs(request.matchdict['search_params'])
param_list['field'] = ['files.href', 'files.file_type', 'files']
param_list['limit'] = ['all']
path = '/search/?%s' % urlencode(param_list, True)
results = request.embed(path, as_user=True)
metadata_link = '{host_url}/metadata/{search_params}/metadata.tsv'.format(
host_url=request.host_url,
search_params=request.matchdict['search_params']
)
files = [metadata_link]
if 'files.file_type' in param_list:
for exp in results['@graph']:
for f in exp.get('files', []):
if f['file_type'] in param_list['files.file_type']:
files.append('{host_url}{href}'.format(
host_url=request.host_url,
href=f['href']
))
else:
for exp in results['@graph']:
for f in exp.get('files', []):
files.append('{host_url}{href}'.format(
host_url=request.host_url,
href=f['href']
))
return Response(
content_type='text/plain',
body='\n'.join(files),
content_disposition='attachment; filename="%s"' % 'files.txt'
)
def lookup_column_value(value, path):
nodes = [value]
names = path.split('.')
for name in names:
nextnodes = []
for node in nodes:
if name not in node:
continue
value = node[name]
if isinstance(value, list):
nextnodes.extend(value)
else:
nextnodes.append(value)
nodes = nextnodes
if not nodes:
return ''
# if we ended with an embedded object, show the @id
if nodes and hasattr(nodes[0], '__contains__') and '@id' in nodes[0]:
nodes = [node['@id'] for node in nodes]
seen = set()
deduped_nodes = []
for n in nodes:
if isinstance(n, dict):
n = str(n)
if n not in seen:
deduped_nodes.append(n)
return u','.join(u'{}'.format(n) for n in deduped_nodes)
def format_row(columns):
"""Format a list of text columns as a tab-separated byte string."""
return b'\t'.join([bytes_(c, 'utf-8') for c in columns]) + b'\r\n'
@view_config(route_name='report_download', request_method='GET')
def report_download(context, request):
types = request.params.getall('type')
if len(types) != 1:
msg = 'Report view requires specifying a single type.'
raise HTTPBadRequest(explanation=msg)
# Make sure we get all results
request.GET['limit'] = 'all'
type = types[0]
schemas = [request.registry[TYPES][type].schema]
columns = list_visible_columns_for_schemas(request, schemas)
type = type.replace("'", '')
def format_header(seq):
newheader="%s\t%s%s?%s\r\n" % (currenttime, request.host_url, '/report/', request.query_string)
return(bytes(newheader, 'utf-8'))
# Work around Excel bug; can't open single column TSV with 'ID' header
if len(columns) == 1 and '@id' in columns:
columns['@id']['title'] = 'id'
header = [column.get('title') or field for field, column in columns.items()]
def generate_rows():
yield format_header(header)
yield format_row(header)
for item in iter_search_results(context, request):
values = [lookup_column_value(item, path) for path in columns]
yield format_row(values)
# Stream response using chunked encoding.
request.response.content_type = 'text/tsv'
request.response.content_disposition = 'attachment;filename="%s"' % '%(doctype)s Report %(yyyy)s/%(mm)s/%(dd)s.tsv' % {'yyyy': currenttime.year, 'mm': currenttime.month, 'dd': currenttime.day, 'doctype': type} #change file name
request.response.app_iter = generate_rows()
return request.response
| 42.104326 | 231 | 0.611652 |
418078d4e3be94880f3339f61052ffce4d274fcc | 119 | py | Python | batproject/accountapp/admin.py | JaL11/BAT | ed4bccef3c70ec01064ebd0c26933853d4f95355 | [
"MIT"
] | 1 | 2020-07-16T14:29:55.000Z | 2020-07-16T14:29:55.000Z | batproject/accountapp/admin.py | JaL11/BAT | ed4bccef3c70ec01064ebd0c26933853d4f95355 | [
"MIT"
] | 63 | 2020-06-04T14:41:18.000Z | 2020-07-29T18:06:14.000Z | batproject/accountapp/admin.py | JaL11/BAT | ed4bccef3c70ec01064ebd0c26933853d4f95355 | [
"MIT"
] | 6 | 2020-06-06T13:12:35.000Z | 2020-08-28T20:25:51.000Z | from django.contrib import admin
# Register your models here.
from .models import User
admin.site.register(User)
| 19.833333 | 33 | 0.764706 |
5b802f71e281f98036456fc7a5675e9deac9b681 | 6,753 | py | Python | example/ibc/main.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | example/ibc/main.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | example/ibc/main.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | import json
import subprocess
import time
from pathlib import Path
import requests
import yaml
from pystarport.cluster import (ClusterCLI, find_account, init_cluster,
interact, start_cluster)
from pystarport.ports import api_port
from chainlibpy import Transaction, Wallet
from chainlibpy.amino import Coin, StdFee, TimeoutHeight
from chainlibpy.amino.message import IbcMsgTransfer
class Runner():
'''
we use pystarport to create the IBC env
need to install hermes: https://github.com/informalsystems/ibc-rs/releases
'''
def __init__(self, data_root=Path("/tmp/data"), config_file="config.yaml"):
self.data_root = data_root
self.config_file = config_file
@property
def cluster(self):
config = yaml.safe_load(open(self.config_file))
clis = {}
for key in config:
if key == "relayer":
continue
chain_id = key
clis[chain_id] = ClusterCLI(self.data_root, chain_id=chain_id)
return clis
def url_base(self, chain_id, index=0):
cli = self.cluster[chain_id]
port = cli.base_port(index)
return "http://127.0.0.1:{}".format(api_port(port))
def get_balance(self, chain_id, index, address):
url_base = self.url_base(chain_id, index)
url_balance = f"{url_base}/cosmos/bank/v1beta1/balances/{address}"
response = requests.get(url_balance)
balance = int(response.json()["balances"][0]["amount"])
return balance
def get_account_info(self, chain_id, index, address):
url_base = self.url_base(chain_id, index)
url_account = f"{url_base}/cosmos/auth/v1beta1/accounts/{address}"
response = requests.get(url_account)
account_info = response.json()["account"]
account_num = int(account_info["account_number"])
sequence = int(account_info["sequence"])
return account_num, sequence
def send_tx(self, chain_id, index, data):
url_base = self.url_base(chain_id, index)
url = f"{url_base}/txs"
response = requests.post(url, json=data)
return response
def init_relayer(self):
relayer = ["hermes", "-j", "-c", self.data_root / "relayer.toml"]
subprocess.run(
relayer
+ [
"create",
"channel",
"ibc-0",
"ibc-1",
"--port-a",
"transfer",
"--port-b",
"transfer",
],
check=True,
)
# start relaying
self.cluster["ibc-0"].supervisor.startProcess("relayer-demo")
@property
def relayer_channels(self):
# all clusters share the same root data directory
relayer = ["hermes", "-j", "-c", self.data_root / "relayer.toml"]
rsp = json.loads(subprocess.check_output(relayer + ["query", "channels", "ibc-0"]))
src_channel = rsp["result"][0]["channel_id"]
rsp = json.loads(subprocess.check_output(relayer + ["query", "channels", "ibc-1"]))
dst_channel = rsp["result"][0]["channel_id"]
return src_channel, dst_channel
def start(self):
'''
after start the tasks, you can use `supervisorctl -c task.ini` to see the status of each program
'''
data_path = "/tmp/dadta"
interact(f"rm -r {data_path}; mkdir -p {data_path}", ignore_error=True)
data_dir = Path("/tmp/data")
init_cluster(data_dir, "config.yaml", 26650)
start_cluster(data_dir)
time.sleep(10)
self.init_relayer()
def test_ibc():
r = Runner()
# r.start()
# time.sleep(10)
seed_0 = find_account(r.data_root, "ibc-0", "relayer")["mnemonic"]
seed_1 = find_account(r.data_root, "ibc-1", "relayer")["mnemonic"]
wallet_0 = Wallet(seed_0)
wallet_1 = Wallet(seed_1)
addr_0 = wallet_0.address
addr_1 = wallet_1.address
src_channel, dst_channel = r.relayer_channels
# do a transfer from ibc-0 to ibc-1
print("transfer ibc0 -> ibc1")
account_num, sequence = r.get_account_info("ibc-0", 0, addr_0)
fee = StdFee("300000", [Coin("100000")])
tx = Transaction(
wallet=wallet_0,
account_num=account_num,
sequence=sequence,
chain_id="ibc-0",
fee=fee,
)
amount = Coin("10000")
target_version = 1
timeout_height = TimeoutHeight(str(target_version), "10000000000")
msg = IbcMsgTransfer(
source_port="transfer",
source_channel=src_channel,
sender=addr_0,
receiver=addr_1,
coin=amount,
packet_timeout_height=timeout_height,
packet_timeout_timestamp="0",
absolute_timeouts=True,
)
tx.add_msg(msg)
signed_tx = tx.get_pushable()
response = r.send_tx("ibc-0", 0, signed_tx)
if not response.ok:
raise Exception(response.reason)
else:
result = response.json()
print("send tx result:", result)
if result.get("code"):
raise Exception(result["raw_log"])
# get the balance after sync
time.sleep(5)
# get the ibc-0 balance
balance_0 = r.get_balance("ibc-0", 0, addr_0)
print("balance 0 after transfer: ", balance_0)
balance_1 = r.get_balance("ibc-1", 0, addr_1)
print("balance 1 after transfer: ", balance_1)
# do a transfer from ibc-1 to ibc-0
print("transfer ibc1 -> ibc0")
account_num, sequence = r.get_account_info("ibc-1", 0, addr_1)
tx = Transaction(
wallet=wallet_1,
account_num=account_num,
sequence=sequence,
chain_id="ibc-1",
)
amount = Coin("10000", f"transfer/{dst_channel}/basecro")
target_version = 0
timeout_height = TimeoutHeight(str(target_version), "10000000000")
msg = IbcMsgTransfer(
source_port="transfer",
source_channel=dst_channel,
sender=addr_1,
receiver=addr_0,
coin=amount,
packet_timeout_height=timeout_height,
packet_timeout_timestamp="0",
absolute_timeouts=True,
)
tx.add_msg(msg)
signed_tx = tx.get_pushable()
response = r.send_tx("ibc-1", 0, signed_tx)
if not response.ok:
raise Exception(response.reason)
else:
result = response.json()
print("send tx result:", result)
if result.get("code"):
raise Exception(result["raw_log"])
# get the balance after sync
time.sleep(50)
# get the ibc-0 balance
balance_0 = r.get_balance("ibc-0", 0, addr_0)
print("balance 0 after transfer: ", balance_0)
balance_1 = r.get_balance("ibc-1", 0, addr_1)
print("balance 1 after transfer: ", balance_1)
if __name__ == "__main__":
test_ibc()
| 33.26601 | 104 | 0.614394 |
a84fa6edf2b9189f7afa5bdd83be2921d2a76042 | 3,085 | py | Python | yap/controllers/front.py | AFPy/Yap | 542fd1b679cedf1772c3ce0948d1fa40390a288e | [
"PSF-2.0"
] | null | null | null | yap/controllers/front.py | AFPy/Yap | 542fd1b679cedf1772c3ce0948d1fa40390a288e | [
"PSF-2.0"
] | null | null | null | yap/controllers/front.py | AFPy/Yap | 542fd1b679cedf1772c3ce0948d1fa40390a288e | [
"PSF-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
# (C) Copyright 2008 Tarek Ziadé <[email protected]>
#
import logging
import os
from os.path import join, dirname
import shutil
from lxml import etree
import time
import datetime
from pylons import config
from sgmllib import SGMLParser
from yap.lib.base import *
from atomisator.main.config import AtomisatorConfig
TITLESIZE = 70
MAXSIZE = 150
log = logging.getLogger(__name__)
root = os.path.split(os.path.dirname(__file__))[0]
PUBLIC_RSS = os.path.realpath(join(root, 'public', 'rss.xml'))
CONFIG = join(dirname(root), 'atomisator.cfg')
class Html2Txt(SGMLParser):
def reset(self):
SGMLParser.reset(self)
self.pieces = []
def handle_data(self, text):
self.pieces.append(text)
def handle_entityref(self, ref):
if ref == 'amp':
self.pieces.append("&")
def output(self):
return ' '.join(self.pieces).replace('<br/>', '')
class FrontController(BaseController):
def index(self):
parser = AtomisatorConfig(CONFIG)
# getting parameters for the rss output
rss = dict(parser.outputs)['rss']
# getting the target xml file
rss_file = rss[0]
xml = os.path.realpath(rss_file)
if not os.path.exists(xml):
xml = os.path.realpath(join(root, rss_file))
if not os.path.exists(xml):
raise ValueError('File %s not found' % xml)
# if not under public, we need to copy it to public/rss.xml
if xml != PUBLIC_RSS:
shutil.copyfile(xml, PUBLIC_RSS)
doc = etree.XML(open(xml).read())
items = doc.xpath('/rss/channel/item')
def _date(value):
d = time.strptime(value.split('.')[0], '%Y-%m-%d %H:%M:%S')
d = datetime.datetime(*d[:6])
return d.strftime('%d/%m/%Y')
def _extract(entry):
if entry.tag == 'pubDate':
return entry.tag, _date(entry.text)
if entry.tag == 'title':
if len(entry.text) > TITLESIZE:
return 'title', entry.text[:TITLESIZE] + '...'
return 'title', entry.text
return entry.tag, entry.text
items = [dict([_extract(x)
for x in e.getchildren()])
for e in items]
# building an extract
def _extract(html, title):
if isinstance(html, unicode):
try:
html = html.decode('utf8')
except:
html = str(type(html))
parser = Html2Txt()
parser.reset()
parser.feed(html)
parser.close()
res = parser.output().strip()
size = MAXSIZE - len(title)
if size < 0:
return ''
return res[:size] + '...'
for i in items:
i['extract'] = _extract(i['description'], i['title'])
c.entries = items
c.title = doc.xpath('/rss/channel/title')[0].text
return render('/front.mako')
| 29.663462 | 71 | 0.545867 |
7bb34199a716aba79a1f5d9cc7cfd915e382ddfa | 3,244 | py | Python | app/main/views.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | null | null | null | app/main/views.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | null | null | null | app/main/views.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | null | null | null | from flask import render_template,request,redirect,url_for,abort,flash
from . import main
from flask_login import login_required,current_user
from ..models import User,Pitch,Comment
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db,photos
# import markdown2
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Pitch Application'
pitch = Pitch.query.all()
# categories = Category.get_categories()
return render_template('index.html',title = title, Pitch = pitch)
@main.route('/pitch/new', methods=['GET','POST'])
@login_required
def new_pitch():
form=PitchForm()
if form.validate_on_submit():
pitches=Pitch(category=form.category.data,pitch_content=form.content.data)
db.session.add(pitches)
db.session.commit()
flash('pitch created')
pitches=Pitch.query.all()
return render_template('pitch.html',form=form, pitch=pitches)
@main.route('/category/<int:id>')
def category(id):
category = PitchCategory.query.get(id)
category_name = PitchCategory.query.get(category_name)
if category is None:
abort(404)
pitch_in_category = Pitch.get_pitch(id)
return render_template('category.html' ,category= category, pitch= pitch_in_category)
@main.route('/pitch/comments/new/<int:id>',methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentForm()
if form.validate_on_submit():
new_comment = Comment(pitch_id =id,data=form.comment.data)
new_comment.save_comment()
return redirect(url_for('main.new_pitch'))
return render_template('ncomment.html', form=form)
@main.route('/comments/<int:id>')
def single_comment(id):
comment=Comment.query.get(id)
if comment is None:
abort(404)
return render_template('new_comment.html')
@main.route('/view/comment/<int:id>')
def view_comments(id):
'''
Function that shows the comments of a particular pitch
'''
comments = Comment.get_comments(id)
return render_template('viewcomment.html',comments = comments, id=id)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html",user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname)) | 28.964286 | 89 | 0.687115 |
6e8229ddd4deceb52130dfea853f3d4016ccf290 | 4,282 | py | Python | genRandInputs.py | deehzee/affine-charform | e03d43972e28e2a364bd54b6bd7b95e77bf2e9d2 | [
"MIT"
] | 2 | 2019-04-13T03:50:22.000Z | 2021-03-02T12:34:47.000Z | genRandInputs.py | deehzee/affine-charform | e03d43972e28e2a364bd54b6bd7b95e77bf2e9d2 | [
"MIT"
] | null | null | null | genRandInputs.py | deehzee/affine-charform | e03d43972e28e2a364bd54b6bd7b95e77bf2e9d2 | [
"MIT"
] | null | null | null | # genRandInput.py - Generate ranodom input
def random_inputs(N = 5, maxvarn = 4, maxs = 3):
# N = size of each sample size for each kind
# maxvarn = maximum variation for n
# maxs = maximum value for s_i
# X = type (A, B, C, D, E or F)
# n = subscript
# r = superscript
# S = specialization
# in $X_n^{(r)}$
# k = number of nodes (GCM A is (k x k) matrix)
import random
sfrom = range(maxs + 1)
#
# Aff-1: r=1
#
r = 1
# Type $A_n^{(1)}$
X = "A"
nfrom = range(1, maxvarn + 1)
for _ in range(N):
n = random.choice(nfrom)
k = n + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $B_n^{(1)}$
X = "B"
nfrom = range(3, maxvarn + 3)
for _ in range(N):
n = random.choice(nfrom)
k = n + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $C_n^{(1)}$
X = "C"
nfrom = range(2, maxvarn + 2)
for _ in range(N):
n = random.choice(nfrom)
k = n + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $D_n^{(1)}$
X = "D"
nfrom = range(4, maxvarn + 4)
for _ in range(N):
n = random.choice(nfrom)
k = n + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $E_n^{(1)}$
X = "E"
nfrom = [6, 7, 8]
for _ in range(N):
n = random.choice(nfrom)
k = n + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $F_n^{(1)}$
X, n = "F", 4
k = n + 1
for _ in range(N):
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $G_n^{(1)}$
X, n = "G", 2
k = n + 1
for _ in range(N):
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
#
# Aff-2
#
r = 2
# Type $A_n^{(2)}:
X = "A"
## n is even
nfrom = range(2, 2 + 2*maxvarn, 2)
for _ in range(N):
n = random.choice(nfrom)
k = n/2 + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
## n is odd
nfrom = range(5, 5 + 2*maxvarn, 2)
for _ in range(N):
n = random.choice(nfrom)
k = (n + 1)/2 + 1
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $D_n^{(2)}
X = "D"
nfrom = range(3, 3 + maxvarn)
for _ in range(N):
n = random.choice(nfrom)
k = n
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# Type $E_n^{(2)}$
X, n = "E", 6
k = n - 1
for _ in range(N):
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
#
# Aff-3
#
r = 3
# Type $D_n^{(3)}
X, n = "D", 4
k = n - 1
for _ in range(N):
S = [random.choice(sfrom) for i in range(k)]
print(X, n, r, S)
# End of random_inputs(...)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Generate random inputs.'
)
parser.add_argument(
'-N',
metavar='NRAND',
help='the number of inputs per test cases (default 5)',
action='store',
#dest='N',
type=int,
default=5,
)
parser.add_argument(
'-n', '--varn',
metavar='VARN',
help="the variability range for the parameter 'n' \
(default 4)",
action='store',
type=int,
default=4,
)
parser.add_argument(
'-s', '--maxs',
metavar='MAXS',
help="the max value for each 's_i's (default 3)",
action='store',
type=int,
default=3,
)
parser.add_argument(
'-m', '--message',
metavar='HDR_MSG',
help='the header message at the top',
action='store',
type=str,
default=None,
)
args = parser.parse_args();
# print args
if args.message:
print '# {}'.format(args.message)
random_inputs(args.N, args.varn, args.maxs)
| 24.05618 | 67 | 0.456796 |
d5aadadfed1c29d40d8d3576096584dcc5489b49 | 14,902 | py | Python | monai/handlers/checkpoint_saver.py | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 | [
"Apache-2.0"
] | 2,971 | 2019-10-16T23:53:16.000Z | 2022-03-31T20:58:24.000Z | monai/handlers/checkpoint_saver.py | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 | [
"Apache-2.0"
] | 2,851 | 2020-01-10T16:23:44.000Z | 2022-03-31T22:14:53.000Z | monai/handlers/checkpoint_saver.py | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 | [
"Apache-2.0"
] | 614 | 2020-01-14T19:18:01.000Z | 2022-03-31T14:06:14.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
from typing import TYPE_CHECKING, Dict, Mapping, Optional
from monai.config import IgniteInfo
from monai.utils import min_version, optional_import
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
Checkpoint, _ = optional_import("ignite.handlers", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Checkpoint")
if TYPE_CHECKING:
from ignite.engine import Engine
from ignite.handlers import DiskSaver
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
DiskSaver, _ = optional_import("ignite.handlers", IgniteInfo.OPT_IMPORT_VERSION, min_version, "DiskSaver")
class CheckpointSaver:
"""
CheckpointSaver acts as an Ignite handler to save checkpoint data into files.
It supports to save according to metrics result, epoch number, iteration number
and last model or exception.
Args:
save_dir: the target directory to save the checkpoints.
save_dict: source objects that save to the checkpoint. examples::
{'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
file_prefix: prefix for the filenames to which objects will be saved.
save_final: whether to save checkpoint or session at final iteration or exception.
If checkpoints are to be saved when an exception is raised, put this handler before
`StatsHandler` in the handler list, because the logic with Ignite can only trigger
the first attached handler for `EXCEPTION_RAISED` event.
final_filename: set a fixed filename to save the final model if `save_final=True`.
If None, default to `checkpoint_final_iteration=N.pt`.
save_key_metric: whether to save checkpoint or session when the value of key_metric is
higher than all the previous values during training.keep 4 decimal places of metric,
checkpoint name is: {file_prefix}_key_metric=0.XXXX.pth.
key_metric_name: the name of key_metric in ignite metrics dictionary.
If None, use `engine.state.key_metric` instead.
key_metric_n_saved: save top N checkpoints or sessions, sorted by the value of key
metric in descending order.
key_metric_filename: set a fixed filename to set the best metric model, if not None,
`key_metric_n_saved` should be 1 and only keep the best metric model.
key_metric_save_state: whether to save the tracking list of key metric in the checkpoint file.
if `True`, then will save an object in the checkpoint file with key `checkpointer` to be
consistent with the `include_self` arg of `Checkpoint` in ignite:
https://pytorch.org/ignite/v0.4.5/generated/ignite.handlers.checkpoint.Checkpoint.html.
typically, it's used to resume training and compare current metric with previous N values.
key_metric_greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise,
save the the first equally scored model. default to `False`.
key_metric_negative_sign: whether adding a negative sign to the metric score to compare metrics,
because for error-like metrics, smaller is better(objects with larger score are retained).
default to `False`.
epoch_level: save checkpoint during training for every N epochs or every N iterations.
`True` is epoch level, `False` is iteration level.
save_interval: save checkpoint every N epochs, default is 0 to save no checkpoint.
n_saved: save latest N checkpoints of epoch level or iteration level, 'None' is to save all.
Note:
CheckpointHandler can be used during training, validation or evaluation.
example of saved files:
- checkpoint_iteration=400.pt
- checkpoint_iteration=800.pt
- checkpoint_epoch=1.pt
- checkpoint_final_iteration=1000.pt
- checkpoint_key_metric=0.9387.pt
"""
def __init__(
self,
save_dir: str,
save_dict: Dict,
name: Optional[str] = None,
file_prefix: str = "",
save_final: bool = False,
final_filename: Optional[str] = None,
save_key_metric: bool = False,
key_metric_name: Optional[str] = None,
key_metric_n_saved: int = 1,
key_metric_filename: Optional[str] = None,
key_metric_save_state: bool = False,
key_metric_greater_or_equal: bool = False,
key_metric_negative_sign: bool = False,
epoch_level: bool = True,
save_interval: int = 0,
n_saved: Optional[int] = None,
) -> None:
if save_dir is None:
raise AssertionError("must provide directory to save the checkpoints.")
self.save_dir = save_dir
if not (save_dict is not None and len(save_dict) > 0):
raise AssertionError("must provide source objects to save.")
self.save_dict = save_dict
self.logger = logging.getLogger(name)
self.epoch_level = epoch_level
self.save_interval = save_interval
self._final_checkpoint = self._key_metric_checkpoint = self._interval_checkpoint = None
self._name = name
class _DiskSaver(DiskSaver):
"""
Enhance the DiskSaver to support fixed filename.
"""
def __init__(self, dirname: str, filename: Optional[str] = None):
# set `atomic=False` as `atomic=True` only gives read/write permission to the user who saved the file,
# without group/others read permission
super().__init__(dirname=dirname, require_empty=False, atomic=False)
self.filename = filename
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
if self.filename is not None:
filename = self.filename
super().__call__(checkpoint=checkpoint, filename=filename, metadata=metadata)
def remove(self, filename: str) -> None:
if self.filename is not None:
filename = self.filename
super().remove(filename=filename)
if save_final:
def _final_func(engine: Engine):
return engine.state.iteration
self._final_checkpoint = Checkpoint(
to_save=self.save_dict,
save_handler=_DiskSaver(dirname=self.save_dir, filename=final_filename),
filename_prefix=file_prefix,
score_function=_final_func,
score_name="final_iteration",
)
if save_key_metric:
def _score_func(engine: Engine):
if isinstance(key_metric_name, str):
metric_name = key_metric_name
elif hasattr(engine.state, "key_metric_name"):
metric_name = engine.state.key_metric_name # type: ignore
else:
raise ValueError(
f"Incompatible values: save_key_metric=True and key_metric_name={key_metric_name}."
)
return (-1 if key_metric_negative_sign else 1) * engine.state.metrics[metric_name]
if key_metric_filename is not None and key_metric_n_saved > 1:
raise ValueError("if using fixed filename to save the best metric model, we should only save 1 model.")
self._key_metric_checkpoint = Checkpoint(
to_save=self.save_dict,
save_handler=_DiskSaver(dirname=self.save_dir, filename=key_metric_filename),
filename_prefix=file_prefix,
score_function=_score_func,
score_name="key_metric",
n_saved=key_metric_n_saved,
include_self=key_metric_save_state,
greater_or_equal=key_metric_greater_or_equal,
)
if save_interval > 0:
def _interval_func(engine: Engine):
return engine.state.epoch if self.epoch_level else engine.state.iteration
self._interval_checkpoint = Checkpoint(
to_save=self.save_dict,
save_handler=_DiskSaver(dirname=self.save_dir),
filename_prefix=file_prefix,
score_function=_interval_func,
score_name="epoch" if self.epoch_level else "iteration",
n_saved=n_saved,
)
def load_state_dict(self, state_dict: Dict) -> None:
"""
Utility to resume the internal state of key metric tracking list if configured to save
checkpoints based on the key metric value.
Note to set `key_metric_save_state=True` when saving the previous checkpoint.
Example::
CheckpointSaver(
...
save_key_metric=True,
key_metric_save_state=True, # config to also save the state of this saver
).attach(engine)
engine.run(...)
# resumed training with a new CheckpointSaver
saver = CheckpointSaver(save_key_metric=True, ...)
# load the previous key metric tracking list into saver
CheckpointLoader("/test/model.pt"), {"checkpointer": saver}).attach(engine)
"""
if self._key_metric_checkpoint is not None:
self._key_metric_checkpoint.load_state_dict(state_dict)
else:
warnings.warn("no key metric checkpoint saver to resume the key metric tracking list.")
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
if self._final_checkpoint is not None:
engine.add_event_handler(Events.COMPLETED, self.completed)
engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised)
if self._key_metric_checkpoint is not None:
engine.add_event_handler(Events.EPOCH_COMPLETED, self.metrics_completed)
if self._interval_checkpoint is not None:
if self.epoch_level:
engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.save_interval), self.interval_completed)
else:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed)
def _delete_previous_final_ckpt(self):
saved = self._final_checkpoint._saved
if len(saved) > 0:
item = saved.pop(0)
self._final_checkpoint.save_handler.remove(item.filename)
self.logger.info(f"Deleted previous saved final checkpoint: {item.filename}")
def completed(self, engine: Engine) -> None:
"""Callback for train or validation/evaluation completed Event.
Save final checkpoint if configure save_final is True.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if not callable(self._final_checkpoint):
raise AssertionError("Error: _final_checkpoint function not specified.")
# delete previous saved final checkpoint if existing
self._delete_previous_final_ckpt()
self._final_checkpoint(engine)
if self.logger is None:
raise AssertionError
if not hasattr(self.logger, "info"):
raise AssertionError("Error, provided logger has not info attribute.")
self.logger.info(f"Train completed, saved final checkpoint: {self._final_checkpoint.last_checkpoint}")
def exception_raised(self, engine: Engine, e: Exception) -> None:
"""Callback for train or validation/evaluation exception raised Event.
Save current data as final checkpoint if configure save_final is True. This callback may be skipped
because the logic with Ignite can only trigger the first attached handler for `EXCEPTION_RAISED` event.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
e: the exception caught in Ignite during engine.run().
"""
if not callable(self._final_checkpoint):
raise AssertionError("Error: _final_checkpoint function not specified.")
# delete previous saved final checkpoint if existing
self._delete_previous_final_ckpt()
self._final_checkpoint(engine)
if self.logger is None:
raise AssertionError
if not hasattr(self.logger, "info"):
raise AssertionError("Error, provided logger has not info attribute.")
self.logger.info(f"Exception raised, saved the last checkpoint: {self._final_checkpoint.last_checkpoint}")
raise e
def metrics_completed(self, engine: Engine) -> None:
"""Callback to compare metrics and save models in train or validation when epoch completed.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if not callable(self._key_metric_checkpoint):
raise AssertionError("Error: _key_metric_checkpoint function not specified.")
self._key_metric_checkpoint(engine)
def interval_completed(self, engine: Engine) -> None:
"""Callback for train epoch/iteration completed Event.
Save checkpoint if configure save_interval = N
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if not callable(self._interval_checkpoint):
raise AssertionError("Error: _interval_checkpoint function not specified.")
self._interval_checkpoint(engine)
if self.logger is None:
raise AssertionError
if not hasattr(self.logger, "info"):
raise AssertionError("Error, provided logger has not info attribute.")
if self.epoch_level:
self.logger.info(f"Saved checkpoint at epoch: {engine.state.epoch}")
else:
self.logger.info(f"Saved checkpoint at iteration: {engine.state.iteration}")
| 47.762821 | 119 | 0.661119 |
d514c63a18be319ee21a400e322a3d9cc8f5b7de | 30,014 | py | Python | plotly/widgets/graph_widget.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/widgets/graph_widget.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/widgets/graph_widget.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | """
Module to allow Plotly graphs to interact with IPython widgets.
"""
import uuid
from collections import deque
import pkgutil
from requests.compat import json as _json
# TODO: protected imports?
import ipywidgets as widgets
from traitlets import Unicode
from IPython.display import Javascript, display
import plotly.plotly.plotly as py
from plotly import utils, tools
from plotly.graph_objs import Figure
# Load JS widget code
# No officially recommended way to do this in any other way
# http://mail.scipy.org/pipermail/ipython-dev/2014-April/013835.html
js_widget_code = pkgutil.get_data('plotly',
'package_data/graphWidget.js'
).decode('utf-8')
display(Javascript(js_widget_code))
__all__ = None
class GraphWidget(widgets.DOMWidget):
"""An interactive Plotly graph widget for use in IPython
Notebooks.
"""
_view_name = Unicode('GraphView', sync=True)
_view_module = Unicode('graphWidget', sync=True)
_message = Unicode(sync=True)
_graph_url = Unicode(sync=True)
_new_url = Unicode(sync=True)
_filename = ''
_flags = {
'save_pending': False
}
# TODO: URL for offline enterprise
def __init__(self, graph_url='https://plot.ly/~playground/7', **kwargs):
"""Initialize a plotly graph widget
Args:
graph_url: The url of a Plotly graph
Example:
```
GraphWidget('https://plot.ly/~chris/3375')
```
"""
super(GraphWidget, self).__init__(**kwargs)
# TODO: Validate graph_url
self._graph_url = graph_url
self._listener_set = set()
self._event_handlers = {
'click': widgets.CallbackDispatcher(),
'hover': widgets.CallbackDispatcher(),
'zoom': widgets.CallbackDispatcher()
}
self._graphId = ''
self.on_msg(self._handle_msg)
# messages to the iframe client need to wait for the
# iframe to communicate that it is ready
# unfortunately, this two-way blocking communication
# isn't possible
# (https://github.com/ipython/ipython/wiki/IPEP-21:-Widget-Messages#caveats)
# so we'll just cue up messages until they're ready to be sent
self._clientMessages = deque()
@property
def url(self):
return self._new_url or ''
def _handle_msg(self, message):
"""Handle a msg from the front-end.
Args:
content (dict): Content of the msg.
"""
content = message['content']['data']['content']
if content.get('event', '') == 'pong':
self._graphId = content['graphId']
# ready to recieve - pop out all of the items in the deque
while self._clientMessages:
_message = self._clientMessages.popleft()
_message['graphId'] = self._graphId
_message = _json.dumps(_message)
self._message = _message
if content.get('event', '') in ['click', 'hover', 'zoom']:
# De-nest the message
if content['event'] == 'click' or content['event'] == 'hover':
message = content['message']['points']
elif content['event'] == 'zoom':
message = content['message']['ranges']
self._event_handlers[content['event']](self, message)
if content.get('event', '') == 'getAttributes':
self._attributes = content.get('response', {})
# there might be a save pending, use the plotly module to save
if self._flags['save_pending']:
self._flags['save_pending'] = False
url = py.plot(self._attributes, auto_open=False,
filename=self._filename, validate=False)
self._new_url = url
self._fade_to('slow', 1)
def _handle_registration(self, event_type, callback, remove):
self._event_handlers[event_type].register_callback(callback,
remove=remove)
event_callbacks = self._event_handlers[event_type].callbacks
if (len(event_callbacks) and event_type not in self._listener_set):
self._listener_set.add(event_type)
message = {'task': 'listen', 'events': list(self._listener_set)}
self._handle_outgoing_message(message)
def _handle_outgoing_message(self, message):
if self._graphId == '':
self._clientMessages.append(message)
else:
message['graphId'] = self._graphId
message['uid'] = str(uuid.uuid4())
self._message = _json.dumps(message, cls=utils.PlotlyJSONEncoder)
def on_click(self, callback, remove=False):
""" Assign a callback to click events propagated
by clicking on point(s) in the Plotly graph.
Args:
callback (function): Callback function this is called
on click events with the signature:
callback(widget, hover_obj) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
click_obj (dict): a nested dict that describes
which point(s) were clicked on.
click_obj example:
[
{
'curveNumber': 1,
'pointNumber': 2,
'x': 4,
'y': 14
}
]
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, msg):
display(widget._graph_url)
display(msg)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_click(message_handler)
```
"""
self._handle_registration('click', callback, remove)
def on_hover(self, callback, remove=False):
""" Assign a callback to hover events propagated
by hovering over points in the Plotly graph.
Args:
callback (function): Callback function this is called
on hover events with the signature:
callback(widget, hover_obj) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
hover_obj (dict): a nested dict that describes
which point(s) was hovered over.
hover_obj example:
[
{
'curveNumber': 1,
'pointNumber': 2,
'x': 4,
'y': 14
}
]
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, hover_msg):
display(widget._graph_url)
display(hover_msg)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_hover(message_handler)
```
"""
self._handle_registration('hover', callback, remove)
def on_zoom(self, callback, remove=False):
""" Assign a callback to zoom events propagated
by zooming in regions in the Plotly graph.
Args:
callback (function): Callback function this is called
on zoom events with the signature:
callback(widget, ranges) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
ranges (dict): A description of the
region that was zoomed into.
ranges example:
{
'x': [1.8399058038561549, 2.16443359662],
'y': [4.640902872777017, 7.855677154582]
}
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, ranges):
display(widget._graph_url)
display(ranges)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_zoom(message_handler)
```
"""
self._handle_registration('zoom', callback, remove)
def plot(self, figure_or_data, validate=True):
"""Plot figure_or_data in the Plotly graph widget.
Args:
figure_or_data (dict, list, or plotly.graph_obj object):
The standard Plotly graph object that describes Plotly
graphs as used in `plotly.plotly.plot`. See examples
of the figure_or_data in https://plot.ly/python/
Returns: None
Example 1 - Graph a scatter plot:
```
from plotly.graph_objs import Scatter
g = GraphWidget()
g.plot([Scatter(x=[1, 2, 3], y=[10, 15, 13])])
```
Example 2 - Graph a scatter plot with a title:
```
from plotly.graph_objs import Scatter, Figure, Data
fig = Figure(
data = Data([
Scatter(x=[1, 2, 3], y=[20, 15, 13])
]),
layout = Layout(title='Experimental Data')
)
g = GraphWidget()
g.plot(fig)
```
Example 3 - Clear a graph widget
```
from plotly.graph_objs import Scatter, Figure
g = GraphWidget()
g.plot([Scatter(x=[1, 2, 3], y=[10, 15, 13])])
# Now clear it
g.plot({}) # alternatively, g.plot(Figure())
```
"""
if figure_or_data == {} or figure_or_data == Figure():
validate = False
figure = tools.return_figure_from_figure_or_data(figure_or_data,
validate)
message = {
'task': 'newPlot',
'data': figure.get('data', []),
'layout': figure.get('layout', {}),
'graphId': self._graphId
}
self._handle_outgoing_message(message)
def restyle(self, update, indices=None):
"""Update the style of existing traces in the Plotly graph.
Args:
update (dict):
dict where keys are the graph attribute strings
and values are the value of the graph attribute.
To update graph objects that are nested, like
a marker's color, combine the keys with a period,
e.g. `marker.color`. To replace an entire nested object,
like `marker`, set the value to the object.
See Example 2 below.
To update an attribute of multiple traces, set the
value to an list of values. If the list is shorter
than the number of traces, the values will wrap around.
Note: this means that for values that are naturally an array,
like `x` or `colorscale`, you need to wrap the value
in an extra array,
i.e. {'colorscale': [[[0, 'red'], [1, 'green']]]}
You can also supply values to different traces with the
indices argument.
See all of the graph attributes in our reference documentation
here: https://plot.ly/python/reference or by calling `help` on
graph objects in `plotly.graph_objs`.
indices (list, optional):
Specify which traces to apply the update dict to.
Negative indices are supported.
If indices are not given, the update will apply to
*all* traces.
Examples:
Initialization - Start each example below with this setup:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget()
display(graph)
```
Example 1 - Set `marker.color` to red in every trace in the graph
```
graph.restyle({'marker.color': 'red'})
```
Example 2 - Replace `marker` with {'color': 'red'}
```
graph.restyle({'marker': {'color': red'}})
```
Example 3 - Set `marker.color` to red
in the first trace of the graph
```
graph.restyle({'marker.color': 'red'}, indices=[0])
```
Example 4 - Set `marker.color` of all of the traces to
alternating sequences of red and green
```
graph.restyle({'marker.color': ['red', 'green']})
```
Example 5 - Set just `marker.color` of the first two traces
to red and green
```
graph.restyle({'marker.color': ['red', 'green']}, indices=[0, 1])
```
Example 6 - Set multiple attributes of all of the traces
```
graph.restyle({
'marker.color': 'red',
'line.color': 'green'
})
```
Example 7 - Update the data of the first trace
```
graph.restyle({
'x': [[1, 2, 3]],
'y': [[10, 20, 30]],
}, indices=[0])
```
Example 8 - Update the data of the first two traces
```
graph.restyle({
'x': [[1, 2, 3],
[1, 2, 4]],
'y': [[10, 20, 30],
[5, 8, 14]],
}, indices=[0, 1])
```
"""
# TODO: Add flat traces to graph_objs
message = {
'task': 'restyle',
'update': update,
'graphId': self._graphId
}
if indices:
message['indices'] = indices
self._handle_outgoing_message(message)
def relayout(self, layout):
"""Update the layout of the Plotly graph.
Args:
layout (dict):
dict where keys are the graph attribute strings
and values are the value of the graph attribute.
To update graph objects that are nested, like
the title of an axis, combine the keys with a period
e.g. `xaxis.title`. To set a value of an element in an array,
like an axis's range, use brackets, e.g. 'xaxis.range[0]'.
To replace an entire nested object, just specify the value to
the sub-object. See example 4 below.
See all of the layout attributes in our reference documentation
https://plot.ly/python/reference/#Layout
Or by calling `help` on `plotly.graph_objs.Layout`
Examples - Start each example below with this setup:
Initialization:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget('https://plot.ly/~chris/3979')
display(graph)
```
Example 1 - Update the title
```
graph.relayout({'title': 'Experimental results'})
```
Example 2 - Update the xaxis range
```
graph.relayout({'xaxis.range': [-1, 6]})
```
Example 3 - Update the first element of the xaxis range
```
graph.relayout({'xaxis.range[0]': -3})
```
Example 4 - Replace the entire xaxis object
```
graph.relayout({'xaxis': {'title': 'Experimental results'}})
```
"""
# TODO: Add flat layout to graph_objs
message = {
'task': 'relayout', 'update': layout, 'graphId': self._graphId
}
self._handle_outgoing_message(message)
def hover(self, *hover_objs):
"""Show hover labels over the points specified in hover_obj.
Hover labels are the labels that normally appear when the
mouse hovers over points in the plotly graph.
Args:
hover_objs (tuple of dicts):
Specifies which points to place hover labels over.
The location of the hover labels is described by a dict with
keys and'xval' and/or 'yval' or 'curveNumber' and 'pointNumber'
and optional keys 'hovermode' and 'subplot'
'xval' and 'yval' specify the (x, y) coordinates to
place the label.
'xval' and 'yval need to be close to a point drawn in a graph.
'curveNumber' and 'pointNumber' specify the trace number and
the index theof the point in that trace respectively.
'subplot' describes which axes to the coordinates refer to.
By default, it is equal to 'xy'. For example, to specify the
second x-axis and the third y-axis, set 'subplot' to 'x2y3'
'hovermode' is either 'closest', 'x', or 'y'.
When set to 'x', all data sharing the same 'x' coordinate will
be shown on screen with corresponding trace labels.
When set to 'y' all data sharing the same 'y' coordinates will
be shown on the screen with corresponding trace labels.
When set to 'closest', information about the data point closest
to where the viewer is hovering will appear.
Note: If 'hovermode' is 'x', only 'xval' needs to be set.
If 'hovermode' is 'y', only 'yval' needs to be set.
If 'hovermode' is 'closest', 'xval' and 'yval' both
need to be set.
Note: 'hovermode' can be toggled by the user in the graph
toolbar.
Note: It is not currently possible to apply multiple hover
labels to points on different axes.
Note: `hover` can only be called with multiple dicts if
'curveNumber' and 'pointNumber' are the keys of the dicts
Examples:
Initialization - Start each example below with this setup:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget('https://plot.ly/~chris/3979')
display(graph)
```
Example 1 - Apply a label to the (x, y) point (3, 2)
```
graph.hover({'xval': 3, 'yval': 2, 'hovermode': 'closest'})
```
Example 2 -Apply a labels to all the points with the x coordinate 3
```
graph.hover({'xval': 3, 'hovermode': 'x'})
```
Example 3 - Apply a label to the first point of the first trace
and the second point of the second trace.
```
graph.hover({'curveNumber': 0, 'pointNumber': 0},
{'curveNumber': 1, 'pointNumber': 1})
```
"""
# TODO: Add to graph objects
if len(hover_objs) == 1:
hover_objs = hover_objs[0]
message = {
'task': 'hover', 'selection': hover_objs, 'graphId': self._graphId
}
self._handle_outgoing_message(message)
def add_traces(self, traces, new_indices=None):
""" Add new data traces to a graph.
If `new_indices` isn't specified, they are simply appended.
Args:
traces (dict or list of dicts, or class of plotly.graph_objs):trace
new_indices (list[int]|None), optional: The final indices the
added traces should occupy in the graph.
Examples:
Initialization - Start each example below with this setup:
```
from plotly.widgets import GraphWidget
from plotly.graph_objs import Scatter
from IPython.display import display
graph = GraphWidget('https://plot.ly/~chris/3979')
display(graph)
```
Example 1 - Add a scatter/line trace to the graph
```
graph.add_traces(Scatter(x = [1, 2, 3], y = [5, 4, 5]))
```
Example 2 - Add a scatter trace and set it to to be the
second trace. This will appear as the second
item in the legend.
```
graph.add_traces(Scatter(x = [1, 2, 3], y = [5, 6, 5]),
new_indices=[1])
```
Example 3 - Add multiple traces to the graph
```
graph.add_traces([
Scatter(x = [1, 2, 3], y = [5, 6, 5]),
Scatter(x = [1, 2.5, 3], y = [5, 8, 5])
])
```
"""
# TODO: Validate traces with graph_objs
message = {
'task': 'addTraces', 'traces': traces, 'graphId': self._graphId
}
if new_indices is not None:
message['newIndices'] = new_indices
self._handle_outgoing_message(message)
def delete_traces(self, indices):
"""Delete data traces from a graph.
Args:
indices (list[int]): The indices of the traces to be removed
Example - Delete the 2nd trace:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget('https://plot.ly/~chris/3979')
display(graph)
graph.delete_traces([1])
```
"""
message = {
'task': 'deleteTraces',
'indices': indices,
'graphId': self._graphId
}
self._handle_outgoing_message(message)
def reorder_traces(self, current_indices, new_indices=None):
"""Reorder the traces in a graph.
The order of the traces determines the order of the legend entries
and the layering of the objects drawn in the graph, i.e. the first
trace is drawn first and the second trace is drawn on top of the
first trace.
Args:
current_indices (list[int]): The index of the traces to reorder.
new_indices (list[int], optional): The index of the traces
specified by `current_indices` after ordering.
If None, then move the traces to the end.
Examples:
Example 1 - Move the first trace to the second to last
position, the second trace to the last position
```
graph.move_traces([0, 1])
```
Example 2 - Move the first trace to the second position,
the second trace to the first position.
```
graph.move_traces([0], [1])
```
"""
message = {
'task': 'moveTraces',
'currentIndices': current_indices,
'graphId': self._graphId
}
if new_indices is not None:
message['newIndices'] = new_indices
self._handle_outgoing_message(message)
def save(self, ignore_defaults=False, filename=''):
"""
Save a copy of the current state of the widget in plotly.
:param (bool) ignore_defaults: Auto-fill in unspecified figure keys?
:param (str) filename: Name of the file on plotly.
"""
self._flags['save_pending'] = True
self._filename = filename
message = {'task': 'getAttributes', 'ignoreDefaults': ignore_defaults}
self._handle_outgoing_message(message)
self._fade_to('slow', 0.1)
def extend_traces(self, update, indices=(0,), max_points=None):
""" Append data points to existing traces in the Plotly graph.
Args:
update (dict):
dict where keys are the graph attribute strings
and values are arrays of arrays with values to extend.
Each array in the array will extend a trace.
Valid keys include:
'x', 'y', 'text,
'marker.color', 'marker.size', 'marker.symbol',
'marker.line.color', 'marker.line.width'
indices (list, int):
Specify which traces to apply the `update` dict to.
If indices are not given, the update will apply to
the traces in order.
max_points (int or dict, optional):
If specified, then only show the `max_points` most
recent points in the graph.
This is useful to prevent traces from becoming too
large (and slow) or for creating "windowed" graphs
in monitoring applications.
To set max_points to different values for each trace
or attribute, set max_points to a dict mapping keys
to max_points values. See the examples below.
Examples:
Initialization - Start each example below with this setup:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget()
graph.plot([
{'x': [], 'y': []},
{'x': [], 'y': []}
])
display(graph)
```
Example 1 - Extend the first trace with x and y data
```
graph.extend_traces({'x': [[1, 2, 3]], 'y': [[10, 20, 30]]},
indices=[0])
```
Example 2 - Extend the second trace with x and y data
```
graph.extend_traces({'x': [[1, 2, 3]], 'y': [[10, 20, 30]]},
indices=[1])
```
Example 3 - Extend the first two traces with x and y data
```
graph.extend_traces({
'x': [[1, 2, 3], [2, 3, 4]],
'y': [[10, 20, 30], [3, 4, 3]]
}, indices=[0, 1])
```
Example 4 - Extend the first trace with x and y data and
limit the length of data in that trace to 50
points.
```
graph.extend_traces({
'x': [range(100)],
'y': [range(100)]
}, indices=[0, 1], max_points=50)
```
Example 5 - Extend the first and second trace with x and y data
and limit the length of data in the first trace to
25 points and the second trace to 50 points.
```
new_points = range(100)
graph.extend_traces({
'x': [new_points, new_points],
'y': [new_points, new_points]
},
indices=[0, 1],
max_points={
'x': [25, 50],
'y': [25, 50]
}
)
```
Example 6 - Update other attributes, like marker colors and
sizes and text
```
# Initialize a plot with some empty attributes
graph.plot([{
'x': [],
'y': [],
'text': [],
'marker': {
'size': [],
'color': []
}
}])
# Append some data into those attributes
graph.extend_traces({
'x': [[1, 2, 3]],
'y': [[10, 20, 30]],
'text': [['A', 'B', 'C']],
'marker.size': [[10, 15, 20]],
'marker.color': [['blue', 'red', 'orange']]
}, indices=[0])
```
Example 7 - Live-update a graph over a few seconds
```
import time
graph.plot([{'x': [], 'y': []}])
for i in range(10):
graph.extend_traces({
'x': [[i]],
'y': [[i]]
}, indices=[0])
time.sleep(0.5)
```
"""
message = {
'task': 'extendTraces',
'update': update,
'graphId': self._graphId,
'indices': indices
}
if max_points is not None:
message['maxPoints'] = max_points
self._handle_outgoing_message(message)
def _fade_to(self, duration, opacity):
"""
Change the opacity to give a visual signal to users.
"""
message = {'fadeTo': True, 'duration': duration, 'opacity': opacity}
self._handle_outgoing_message(message)
| 34.940629 | 84 | 0.506697 |
603daef8bd50008fb784efa6d1e8320da21c3393 | 3,389 | py | Python | apps/greencheck/api/asn_viewset.py | denning/admin-portal | 34a8e9f07f88c85c01ae1255517d889fb7951ba5 | [
"Apache-2.0"
] | 10 | 2020-11-23T22:47:26.000Z | 2022-01-28T16:26:50.000Z | apps/greencheck/api/asn_viewset.py | denning/admin-portal | 34a8e9f07f88c85c01ae1255517d889fb7951ba5 | [
"Apache-2.0"
] | 83 | 2020-05-17T20:25:50.000Z | 2022-03-29T18:11:50.000Z | apps/greencheck/api/asn_viewset.py | denning/admin-portal | 34a8e9f07f88c85c01ae1255517d889fb7951ba5 | [
"Apache-2.0"
] | 3 | 2020-11-30T00:13:45.000Z | 2021-06-11T13:42:04.000Z | import logging
from rest_framework import mixins, viewsets
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework_csv import renderers as drf_csv_rndr # noqa
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema # noqa
from ..models import GreencheckASN
from ..serializers import GreenASNSerializer
from .permissions import BelongsToHostingProvider
logger = logging.getLogger(__name__)
ASN_API_LIST_DESCRIPTION = """
List the AS Networks associated with this provider.
Returns a list of AS Networks registered with the provider.
""" # noqa
ASN_API_CREATE_DESCRIPTION = """
Register a new AS Networks for the hosting provider associated with this user.
Once an ASN is registered, it can take a short while before checks against the new IP
range show as green.
""" # noqa
ASN_API_DESTROY_DESCRIPTION = """
Removes the association of the AS Network with the corresponding id from this
hosting provider.
As with POSTing a new AS Network, there can be a delay until the change propogates.
"""
ASN_API_RETRIEVE_DESCRIPTION = """
Fetch the AS Network for the corresponding id provided.
"""
@method_decorator(
name="list",
decorator=swagger_auto_schema(
operation_description=ASN_API_LIST_DESCRIPTION, tags=["AS Network"]
),
)
@method_decorator(
name="create",
decorator=swagger_auto_schema(
operation_description=ASN_API_CREATE_DESCRIPTION, tags=["AS Network"]
),
)
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description=ASN_API_RETRIEVE_DESCRIPTION, tags=["AS Network"]
),
)
@method_decorator(
name="destroy",
decorator=swagger_auto_schema(
operation_description=ASN_API_DESTROY_DESCRIPTION, tags=["AS Network"]
),
)
class ASNViewSet(
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
"""
This viewset automatically provides `list` and `retrieve` actions.
We don't want ASNs to be editable once created, as they're often linked
to an request to approve it.
So, we expose 'create', 'destroy' and 'list' methods.
Similarly, 'delete' does not delete a range, but instead it marks the IP range
as inactive.
"""
serializer_class = GreenASNSerializer
queryset = GreencheckASN.objects.all()
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [BelongsToHostingProvider]
def filter_queryset(self, queryset):
"""
Because our viewset takes care of pagination and the rest
all we change is what is returned when we filter the queryset
for a given user.
http://www.cdrf.co/3.9/rest_framework.viewsets/ModelViewSet.html#list
"""
user = self.request.user
if user is not None:
provider = self.request.user.hostingprovider
if provider is not None:
return provider.greencheckasn_set.filter(active=True)
return []
def perform_destroy(self, instance):
"""
Overriding this one function means that the rest of
our destroy method works as expected.
"""
instance.active = False
instance.save()
| 30.531532 | 89 | 0.718796 |
5b785bbb99ef2241514135101ebd7a3f34ce3490 | 415 | py | Python | apps/contrib/utils/email.py | jimialex/django-wise-template-mysql | 78b7281ba5cdd1e89a165b217e1b200fdba0135b | [
"MIT"
] | 5 | 2020-04-11T20:11:48.000Z | 2021-03-16T23:58:01.000Z | apps/contrib/utils/email.py | jimialex/django-wise-template-mysql | 78b7281ba5cdd1e89a165b217e1b200fdba0135b | [
"MIT"
] | 5 | 2020-04-11T20:17:56.000Z | 2021-06-16T19:18:29.000Z | apps/contrib/utils/email.py | jimialex/django-wise-template-mysql | 78b7281ba5cdd1e89a165b217e1b200fdba0135b | [
"MIT"
] | 1 | 2020-10-10T14:07:37.000Z | 2020-10-10T14:07:37.000Z | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
def send_email(subject, to, text_body, html_body):
"""Helps to send and email."""
email = EmailMultiAlternatives(
subject=subject,
from_email=settings.DEFAULT_FROM_EMAIL,
to=to, body=text_body,
)
email.attach_alternative(html_body, 'text/html')
email.send()
| 24.411765 | 52 | 0.689157 |
2a19f5b9566d1435bd3209f6b8d2621391a42acd | 15,109 | py | Python | libcloud/compute/drivers/softlayer.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | 1 | 2021-06-14T11:11:39.000Z | 2021-06-14T11:11:39.000Z | libcloud/compute/drivers/softlayer.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | null | null | null | libcloud/compute/drivers/softlayer.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Softlayer driver
"""
import sys
import time
import libcloud
from libcloud.utils.py3 import xmlrpclib
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage
DATACENTERS = {
'sea01': {'country': 'US'},
'wdc01': {'country': 'US'},
'dal01': {'country': 'US'}
}
NODE_STATE_MAP = {
'RUNNING': NodeState.RUNNING,
'HALTED': NodeState.TERMINATED,
'PAUSED': NodeState.TERMINATED,
}
DEFAULT_PACKAGE = 46
SL_IMAGES = [
{'id': 1684, 'name': 'CentOS 5 - Minimal Install (32 bit)'},
{'id': 1685, 'name': 'CentOS 5 - Minimal Install (64 bit)'},
{'id': 1686, 'name': 'CentOS 5 - LAMP Install (32 bit)'},
{'id': 1687, 'name': 'CentOS 5 - LAMP Install (64 bit)'},
{'id': 1688, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (32 bit)'},
{'id': 1689, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (64 bit)'},
{'id': 1690, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (32 bit)'},
{'id': 1691, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (64 bit)'},
{'id': 1692, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit)'},
{'id': 1693, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit)'},
{'id': 1694, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (32 bit)'},
{'id': 1695, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (64 bit)'},
{'id': 1696, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit)'},
{'id': 1697, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit)'},
{'id': 1698, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (32 bit)'},
{'id': 1699, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (64 bit)'},
{'id': 1700, 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)'},
{'id': 1701, 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)'},
{'id': 1703, 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)'},
{'id': 1705, 'name': 'Windows Server 2008 Standard Edition (64bit)'},
{'id': 1715, 'name': 'Windows Server 2003 Datacenter SP2 (64 bit)'},
{'id': 1716, 'name': 'Windows Server 2003 Datacenter SP2 (32 bit)'},
{'id': 1742, 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)'},
{'id': 1752, 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)'},
{'id': 1756, 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)'},
{'id': 1761, 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)'},
{'id': 1766, 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)'},
{'id': 1770, 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)'},
{'id': 1857, 'name': 'Windows Server 2008 R2 Standard Edition (64bit)'},
{'id': 1860, 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)'},
{'id': 1863, 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)'},
]
"""
The following code snippet will print out all available "prices"
mask = { 'items': '' }
res = self.connection.request(
"SoftLayer_Product_Package",
"getObject",
res,
id=46,
object_mask=mask
)
from pprint import pprint; pprint(res)
"""
SL_TEMPLATES = {
'sl1': {
'imagedata': {
'name': '2 x 2.0 GHz, 1GB ram, 100GB',
'ram': 1024,
'disk': 100,
'bandwidth': None
},
'prices':[
{'id': 1644}, # 1 GB
{'id': 1639}, # 100 GB (SAN)
{'id': 1963}, # Private 2 x 2.0 GHz Cores
{'id': 21}, # 1 IP Address
{'id': 55}, # Host Ping
{'id': 58}, # Automated Notification
{'id': 1800}, # 0 GB Bandwidth
{'id': 57}, # Email and Ticket
{'id': 274}, # 1000 Mbps Public & Private Networks
{'id': 905}, # Reboot / Remote Console
{'id': 418}, # Nessus Vulnerability Assessment & Reporting
{'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account
],
},
'sl2': {
'imagedata': {
'name': '2 x 2.0 GHz, 4GB ram, 350GB',
'ram': 4096,
'disk': 350,
'bandwidth': None
},
'prices': [
{'id': 1646}, # 4 GB
{'id': 1639}, # 100 GB (SAN) - This is the only available "First Disk"
{'id': 1638}, # 250 GB (SAN)
{'id': 1963}, # Private 2 x 2.0 GHz Cores
{'id': 21}, # 1 IP Address
{'id': 55}, # Host Ping
{'id': 58}, # Automated Notification
{'id': 1800}, # 0 GB Bandwidth
{'id': 57}, # Email and Ticket
{'id': 274}, # 1000 Mbps Public & Private Networks
{'id': 905}, # Reboot / Remote Console
{'id': 418}, # Nessus Vulnerability Assessment & Reporting
{'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account
],
}
}
class SoftLayerException(LibcloudError):
"""
Exception class for SoftLayer driver
"""
pass
class SoftLayerSafeTransport(xmlrpclib.SafeTransport):
pass
class SoftLayerTransport(xmlrpclib.Transport):
pass
class SoftLayerProxy(xmlrpclib.ServerProxy):
transportCls = (SoftLayerTransport, SoftLayerSafeTransport)
API_PREFIX = 'https://api.softlayer.com/xmlrpc/v3/'
def __init__(self, service, user_agent, verbose=0):
cls = self.transportCls[0]
if SoftLayerProxy.API_PREFIX[:8] == "https://":
cls = self.transportCls[1]
t = cls(use_datetime=0)
t.user_agent = user_agent
xmlrpclib.ServerProxy.__init__(
self,
uri="%s/%s" % (SoftLayerProxy.API_PREFIX, service),
transport=t,
verbose=verbose
)
class SoftLayerConnection(object):
"""
Connection class for the SoftLayer driver
"""
proxyCls = SoftLayerProxy
driver = None
def __init__(self, user, key):
self.user = user
self.key = key
self.ua = []
def request(self, service, method, *args, **kwargs):
sl = self.proxyCls(service, self._user_agent())
headers = {}
headers.update(self._get_auth_headers())
headers.update(self._get_init_params(service, kwargs.get('id')))
headers.update(self._get_object_mask(service, kwargs.get('object_mask')))
params = [{'headers': headers}] + list(args)
try:
return getattr(sl, method)(*params)
except xmlrpclib.Fault:
e = sys.exc_info()[1]
if e.faultCode == "SoftLayer_Account":
raise InvalidCredsError(e.faultString)
raise SoftLayerException(e)
def _user_agent(self):
return 'libcloud/%s (%s)%s' % (
libcloud.__version__,
self.driver.name,
"".join([" (%s)" % x for x in self.ua]))
def user_agent_append(self, s):
self.ua.append(s)
def _get_auth_headers(self):
return {
'authenticate': {
'username': self.user,
'apiKey': self.key
}
}
def _get_init_params(self, service, id):
if id is not None:
return {
'%sInitParameters' % service: {'id': id}
}
else:
return {}
def _get_object_mask(self, service, mask):
if mask is not None:
return {
'%sObjectMask' % service: {'mask': mask}
}
else:
return {}
class SoftLayerNodeDriver(NodeDriver):
"""
SoftLayer node driver
Extra node attributes:
- password: root password
- hourlyRecurringFee: hourly price (if applicable)
- recurringFee : flat rate (if applicable)
- recurringMonths : The number of months in which the recurringFee will be incurred.
"""
connectionCls = SoftLayerConnection
name = 'SoftLayer'
type = Provider.SOFTLAYER
features = {"create_node": ["generates_password"]}
def __init__(self, key, secret=None, secure=False):
self.key = key
self.secret = secret
self.connection = self.connectionCls(key, secret)
self.connection.driver = self
def _to_node(self, host):
try:
password = host['softwareComponents'][0]['passwords'][0]['password']
except (IndexError, KeyError):
password = None
hourlyRecurringFee = host.get('billingItem', {}).get('hourlyRecurringFee', 0)
recurringFee = host.get('billingItem', {}).get('recurringFee', 0)
recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0)
return Node(
id=host['id'],
name=host['hostname'],
state=NODE_STATE_MAP.get(
host['powerState']['keyName'],
NodeState.UNKNOWN
),
public_ips=[host['primaryIpAddress']],
private_ips=[host['primaryBackendIpAddress']],
driver=self,
extra={
'password': password,
'hourlyRecurringFee': hourlyRecurringFee,
'recurringFee': recurringFee,
'recurringMonths': recurringMonths,
}
)
def _to_nodes(self, hosts):
return [self._to_node(h) for h in hosts]
def destroy_node(self, node):
billing_item = self.connection.request(
"SoftLayer_Virtual_Guest",
"getBillingItem",
id=node.id
)
if billing_item:
res = self.connection.request(
"SoftLayer_Billing_Item",
"cancelService",
id=billing_item['id']
)
return res
else:
return False
def _get_order_information(self, order_id, timeout=1200, check_interval=5):
mask = {
'orderTopLevelItems': {
'billingItem': {
'resource': {
'softwareComponents': {
'passwords': ''
},
'powerState': '',
}
},
}
}
for i in range(0, timeout, check_interval):
try:
res = self.connection.request(
"SoftLayer_Billing_Order",
"getObject",
id=order_id,
object_mask=mask
)
item = res['orderTopLevelItems'][0]['billingItem']['resource']
if item['softwareComponents'][0]['passwords']:
return item
except (KeyError, IndexError):
pass
time.sleep(check_interval)
return None
def create_node(self, **kwargs):
"""Create a new SoftLayer node
See L{NodeDriver.create_node} for more keyword args.
@keyword ex_domain: e.g. libcloud.org
@type ex_domain: C{string}
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
domain = kwargs.get('ex_domain')
location = kwargs['location']
if domain == None:
if name.find(".") != -1:
domain = name[name.find('.')+1:]
if domain == None:
# TODO: domain is a required argument for the Sofylayer API, but it
# it shouldn't be.
domain = "exmaple.com"
res = {'prices': SL_TEMPLATES[size.id]['prices']}
res['packageId'] = DEFAULT_PACKAGE
res['prices'].append({'id': image.id}) # Add OS to order
res['location'] = location.id
res['complexType'] = 'SoftLayer_Container_Product_Order_Virtual_Guest'
res['quantity'] = 1
res['useHourlyPricing'] = True
res['virtualGuests'] = [
{
'hostname': name,
'domain': domain
}
]
res = self.connection.request(
"SoftLayer_Product_Order",
"placeOrder",
res
)
order_id = res['orderId']
raw_node = self._get_order_information(order_id)
return self._to_node(raw_node)
def _to_image(self, img):
return NodeImage(
id=img['id'],
name=img['name'],
driver=self.connection.driver
)
def list_images(self, location=None):
return [self._to_image(i) for i in SL_IMAGES]
def _to_size(self, id, size):
return NodeSize(
id=id,
name=size['name'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size['bandwidth'],
price=None,
driver=self.connection.driver,
)
def list_sizes(self, location=None):
return [self._to_size(id, s['imagedata']) for id, s in
list(SL_TEMPLATES.items())]
def _to_loc(self, loc):
return NodeLocation(
id=loc['id'],
name=loc['name'],
country=DATACENTERS[loc['name']]['country'],
driver=self
)
def list_locations(self):
res = self.connection.request(
"SoftLayer_Location_Datacenter",
"getDatacenters"
)
# checking "in DATACENTERS", because some of the locations returned by getDatacenters are not useable.
return [self._to_loc(l) for l in res if l['name'] in DATACENTERS]
def list_nodes(self):
mask = {
'virtualGuests': {
'powerState': '',
'softwareComponents': {
'passwords': ''
},
'billingItem': '',
},
}
res = self.connection.request(
"SoftLayer_Account",
"getVirtualGuests",
object_mask=mask
)
nodes = self._to_nodes(res)
return nodes
def reboot_node(self, node):
res = self.connection.request(
"SoftLayer_Virtual_Guest",
"rebootHard",
id=node.id
)
return res
| 33.800895 | 110 | 0.551923 |
11ca3433648652b74a631296db32a4b9a39b19fc | 14,869 | py | Python | matrix/common/etl/transformers/cell_expression.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | null | null | null | matrix/common/etl/transformers/cell_expression.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | null | null | null | matrix/common/etl/transformers/cell_expression.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | null | null | null | import csv
import glob
import gzip
import hashlib
import json
import os
import pathlib
import typing
import numpy
import scipy.io
import zarr
from . import MetadataToPsvTransformer
from matrix.common.aws.redshift_handler import TableName
from matrix.common.etl.dcp_zarr_store import DCPZarrStore
from matrix.common.exceptions import MatrixException
from matrix.common.logging import Logging
logger = Logging.get_logger(__name__)
class CellExpressionTransformer(MetadataToPsvTransformer):
"""Reads SS2 and 10X bundles and writes out rows for expression and cell tables in PSV format."""
# The minimum UMI count in the emptydrops result required to include a
# putative cell in the matrix service.
emptydrops_min_count = 100
def __init__(self, staging_dir):
super(CellExpressionTransformer, self).__init__(staging_dir)
def _write_rows_to_psvs(self, *args: typing.Tuple):
for arg in args:
table = arg[0]
rows = arg[1]
bundle_dir = arg[2]
out_dir = os.path.join(self.output_dir, table.value)
os.makedirs(out_dir, exist_ok=True)
out_file_path = os.path.join(
self.output_dir,
table.value,
f"{os.path.split(os.path.normpath(bundle_dir))[-1]}.{table.value}.data.gz")
with gzip.open(out_file_path, 'w') as out_file:
out_file.writelines((row.encode() for row in rows))
def _parse_from_metadatas(self, bundle_dir, bundle_manifest_path):
protocol_id = json.load(
open(os.path.join(bundle_dir, "analysis_protocol_0.json")))['protocol_core']['protocol_id']
if protocol_id.startswith("smartseq2"):
cell_lines, expression_lines = self._parse_ss2_bundle(bundle_dir, bundle_manifest_path)
elif protocol_id.startswith("optimus"):
cell_lines, expression_lines = self._parse_optimus_bundle(bundle_dir, bundle_manifest_path)
elif protocol_id.startswith("cellranger"):
cell_lines, expression_lines = self._parse_cellranger_bundle(bundle_dir, bundle_manifest_path)
else:
raise MatrixException(400, f"Failed to parse cell and expression metadata. "
f"Unsupported analysis protocol {protocol_id}.")
return (TableName.CELL, cell_lines, bundle_dir), (TableName.EXPRESSION, expression_lines, bundle_dir)
def _parse_ss2_bundle(self, bundle_dir, bundle_manifest_path):
"""
Parses SS2 analysis files into PSV rows for cell and expression Redshift tables.
"""
# Get the keys associated with this cell, except for cellkey
keys = self._parse_keys(bundle_dir)
cell_key = json.load(open(
os.path.join(bundle_dir, "cell_suspension_0.json")))['provenance']['document_id']
# Read in isoform and gene expression values
isoforms_path = glob.glob(os.path.join(bundle_dir, "*.isoforms.results"))[0]
isoforms_values = {}
with open(isoforms_path) as iso_file:
reader = csv.DictReader(iso_file, delimiter='\t')
for row in reader:
transcript_id = row['transcript_id'].split('.')[0]
isoforms_values[transcript_id] = {
'TPM': float(row['TPM']) + isoforms_values.get(transcript_id, {}).get('TPM', 0),
'Count': float(row['expected_count']) + isoforms_values.get(transcript_id, {}).get('Count', 0)
}
genes_path = glob.glob(os.path.join(bundle_dir, "*.genes.results"))[0]
genes_values = {}
with open(genes_path) as genes_file:
reader = csv.DictReader(genes_file, delimiter='\t')
for row in reader:
gene_id = row['gene_id'].split('.')[0]
genes_values[gene_id] = {
'TPM': float(row['TPM']) + genes_values.get(gene_id, {}).get('TPM', 0),
'Count': float(row['expected_count']) + genes_values.get(gene_id, {}).get('Count', 0)
}
genes_detected = sum((1 for k in genes_values.values() if k["Count"] > 0))
file_uuid = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith(".genes.results")][0]["uuid"]
file_version = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith(".genes.results")][0]["version"]
cell_lines = ['|'.join([
cell_key,
cell_key,
keys["project_key"],
keys["specimen_key"],
keys["library_key"],
keys["analysis_key"],
file_uuid,
file_version,
"",
str(genes_detected),
"",
""]) + '\n']
expression_lines = []
for transcript_id, expr_values in isoforms_values.items():
if expr_values["Count"] == 0:
continue
for expr_type in ["TPM", "Count"]:
expression_line = '|'.join(
[cell_key,
transcript_id,
expr_type,
str(expr_values[expr_type])]) + '\n'
expression_lines.append(expression_line)
for gene_id, expr_values in genes_values.items():
if expr_values["Count"] == 0:
continue
for expr_type in ["TPM", "Count"]:
expression_line = '|'.join(
[cell_key,
gene_id,
expr_type,
str(expr_values[expr_type])]) + '\n'
expression_lines.append(expression_line)
return cell_lines, expression_lines
def _parse_cellranger_bundle(self, bundle_dir, bundle_manifest_path):
"""
Parses cellranger analysis files into PSV rows for cell and expression Redshift tables.
"""
keys = self._parse_keys(bundle_dir)
cell_suspension_id = json.load(open(
os.path.join(bundle_dir, "cell_suspension_0.json")))['provenance']['document_id']
matrix = scipy.io.mmread(os.path.join(bundle_dir, "matrix.mtx"))
genes = [g.split("\t")[0].split(".", 1)[0] for g in
open(os.path.join(bundle_dir, "genes.tsv")).readlines()]
barcodes = [b.strip() for b in open(os.path.join(bundle_dir, "barcodes.tsv")).readlines()]
# columns are cells, rows are genes
expression_lines = []
cell_lines = set()
cell_gene_counts = {}
cell_to_barcode = {}
for i, j, v in zip(matrix.row, matrix.col, matrix.data):
barcode = barcodes[j]
gene = genes[i]
# Just make up a cell id
bundle_uuid = pathlib.Path(bundle_dir).parts[-1]
cell_key = self._generate_10x_cell_key(bundle_uuid, barcode)
cell_to_barcode[cell_key] = barcode
if cell_key not in cell_gene_counts:
cell_gene_counts[cell_key] = {}
cell_gene_counts[cell_key][gene] = cell_gene_counts[cell_key].get(gene, 0) + v
file_uuid = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith("matrix.mtx")][0]["uuid"]
file_version = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith("matrix.mtx")][0]["version"]
for cell_key, gene_count_dict in cell_gene_counts.items():
for gene, count in gene_count_dict.items():
expression_line = '|'.join(
[cell_key,
gene,
"Count",
str(count)]) + '\n'
expression_lines.append(expression_line)
gene_count = len(gene_count_dict)
cell_line = '|'.join(
[cell_key,
cell_suspension_id,
keys["project_key"],
keys["specimen_key"],
keys["library_key"],
keys["analysis_key"],
file_uuid,
file_version,
cell_to_barcode[cell_key],
str(gene_count),
"",
""]) + '\n'
cell_lines.add(cell_line)
return cell_lines, expression_lines
def _parse_optimus_bundle(self, bundle_dir, bundle_manifest_path):
"""
Parses optimus analysis files into PSV rows for cell and expression Redshift tables.
"""
keys = self._parse_keys(bundle_dir)
file_uuid = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith(".zattrs")][0]["uuid"]
file_version = [f for f in json.load(open(bundle_manifest_path))["files"]
if f["name"].endswith(".zattrs")][0]["version"]
emptydrops_result = {}
with open(os.path.join(bundle_dir, "empty_drops_result.csv")) as emptydrops_file:
reader = csv.DictReader(emptydrops_file)
for row in reader:
emptydrops_result[row["CellId"]] = {"total_umi_count": int(row["Total"]),
"is_cell": row["IsCell"] == "TRUE"}
# read expression matrix from zarr
store = DCPZarrStore(bundle_dir=bundle_dir)
root = zarr.group(store=store)
n_cells = root.expression_matrix.cell_id.shape[0]
chunk_size = root.expression_matrix.cell_id.chunks[0]
n_chunks = root.expression_matrix.cell_id.nchunks
cell_lines = set()
expression_lines = []
logger.info(f"Optimus bundle has {n_cells} cells and {n_chunks} chunks.")
for i in range(n_chunks):
self._parse_optimus_chunk(
keys=keys,
file_uuid=file_uuid,
file_version=file_version,
root=root,
start_row=chunk_size * i,
end_row=(i + 1) * chunk_size if (i + 1) * chunk_size < n_cells else n_cells,
cell_lines=cell_lines,
expression_lines=expression_lines,
emptydrops_result=emptydrops_result
)
return cell_lines, expression_lines
def _parse_optimus_chunk(self,
keys: dict,
file_uuid: str,
file_version: str,
root: zarr.Group,
start_row: int,
end_row: int,
cell_lines: set,
expression_lines: list,
emptydrops_result: dict):
"""
Parses a chunk of a zarr group containing an expression matrix into cell and expression PSV lines.
Modifies cell_lines and expression_lines.
:param keys: Metadata keys generated by _parse_keys
:param file_uuid: UUID of the file used for joining with rest of HCA metadata
:param file_version: Version of the file used for joining with rest of HCA metadata
:param root: Zarr group of the full expression matrix
:param start_row: Start row of the chunk
:param end_row: End row of the chunk
:param cell_lines: Output cell PSV lines
:param expression_lines: Output expression PSV lines
:param emptydrops_result: Dict from cell barcode to UMI count and emptydrops call
"""
logger.info(f"Parsing rows {start_row} to {end_row}.")
chunk_size = end_row - start_row
expr_values = root.expression_matrix.expression[start_row:end_row]
barcodes = root.expression_matrix.cell_id[start_row:end_row]
gene_ids = numpy.array([g.split(".")[0] for g in root.expression_matrix.gene_id])
for i in range(chunk_size):
if emptydrops_result[barcodes[i]]["total_umi_count"] < self.emptydrops_min_count:
continue
cell_key = self._generate_10x_cell_key(keys["bundle_uuid"], barcodes[i])
gene_count = numpy.count_nonzero(expr_values[i])
cell_line = '|'.join(
[cell_key,
keys["cell_suspension_key"],
keys["project_key"],
keys["specimen_key"],
keys["library_key"],
keys["analysis_key"],
file_uuid,
file_version,
barcodes[i],
str(gene_count),
str(emptydrops_result[barcodes[i]]["total_umi_count"]),
't' if emptydrops_result[barcodes[i]]["is_cell"] else 'f']
) + '\n'
cell_lines.add(cell_line)
cell_expr_values = expr_values[i]
nonzero_gene_ids = gene_ids[cell_expr_values != 0]
nonzero_cevs = cell_expr_values[cell_expr_values != 0]
for j in range(nonzero_gene_ids.shape[0]):
expression_line = '|'.join(
[cell_key,
nonzero_gene_ids[j],
"Count",
str(nonzero_cevs[j])]
) + '\n'
expression_lines.append(expression_line)
def _generate_10x_cell_key(self, bundle_uuid, barcode):
"""
Generate a unique hash for a cell.
:param bundle_uuid: Bundle UUID the cell belongs to
:param barcode: 10X cell barcode
:return: MD5 hash
"""
h = hashlib.md5()
h.update(bundle_uuid.encode())
h.update(barcode.encode())
return h.hexdigest()
def _parse_keys(self, bundle_dir):
p = pathlib.Path(bundle_dir)
bundle_uuid = pathlib.Path(bundle_dir).parts[-1]
cs_path = p.joinpath("cell_suspension_0.json")
cs_key = json.load(open(cs_path))['provenance']['document_id']
project_path = p.joinpath("project_0.json")
project_key = json.load(open(project_path))["provenance"]["document_id"]
ap_path = p.joinpath("analysis_protocol_0.json")
ap_key = json.load(open(ap_path))["provenance"]["document_id"]
specimen_paths = list(p.glob("specimen_from_organism_*.json"))
specimen_keys = [json.load(open(p))['provenance']['document_id'] for p in specimen_paths]
specimen_key = sorted(specimen_keys)[0]
library_paths = list(p.glob("library_preparation_protocol_*.json"))
library_keys = [json.load(open(p))['provenance']['document_id'] for p in library_paths]
library_key = sorted(library_keys)[0]
return {
"bundle_uuid": bundle_uuid,
"cell_suspension_key": cs_key,
"project_key": project_key,
"specimen_key": specimen_key,
"library_key": library_key,
"analysis_key": ap_key
}
| 41.766854 | 114 | 0.579057 |
fba6143b49d5a5da2085ca9b92b550ec03205161 | 3,208 | py | Python | task1/multiboxloss.py | manhph2211/Receipt-data-extraction | bd9ea74fbe8b8cbeddf201c7ea05d9a85cd38ac5 | [
"MIT"
] | 3 | 2021-02-04T13:25:51.000Z | 2021-08-18T02:15:46.000Z | task1/multiboxloss.py | manhph2211/Receipt-data-extraction | bd9ea74fbe8b8cbeddf201c7ea05d9a85cd38ac5 | [
"MIT"
] | null | null | null | task1/multiboxloss.py | manhph2211/Receipt-data-extraction | bd9ea74fbe8b8cbeddf201c7ea05d9a85cd38ac5 | [
"MIT"
] | null | null | null | # Jaccard:
# Hard negative mining: negative default box = 3times positve default bos
# Loss in regression task: MSE ->F.SmoothL1Loss
# Loss in classification (multi class): F.CrossEntropy
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import match
class MultiBoxLoss(nn.Module):
def __init__(self, jaccard_threshold=0.5, neg_pos=3, device="cpu"):# means negative_defautbox_num=neg_pos * positive_one
super(MultiBoxLoss, self).__init__()
self.jaccard_threshold = jaccard_threshold
self.neg_pos = neg_pos
self.device = device
def forward(self, predictions, targets):
loc_data, conf_data, dbox_list = predictions
# (batch_num, num_dbox, num_classes) -- loc_data
num_batch = loc_data.size(0)
num_dbox = loc_data.size(1) # 8732
num_classes = conf_data.size(2)
# make 2 empty tensors :)
conf_t_label = torch.LongTensor(num_batch, num_dbox).to(self.device)
loc_t = torch.Tensor(num_batch, num_dbox, 4).to(self.device) # every dbox has 4 (x_min...,y_max)
for idx in range(num_batch):
truths = targets[idx][:, :-1].to(self.device) # (xmin, ymin, xmax, ymax) BBox
labels = targets[idx][:, -1].to(self.device) # label
dbox = dbox_list.to(self.device)
variances = [0.1, 0.2]
match(self.jaccard_threshold, truths, dbox, variances, labels, loc_t, conf_t_label, idx) #--> conf_t_label
# SmoothL1Loss
pos_mask = conf_t_label > 0 # positive
# loc_data(num_batch, 8732, 4)
pos_idx = pos_mask.unsqueeze(pos_mask.dim()).expand_as(loc_data)
# positive dbox, loc_data
loc_p = loc_data[pos_idx].view(-1, 4)
#print(loc_p.shape)
loc_t = loc_t[pos_idx].view(-1, 4)
#print(loc_t.shape)
loss_loc = F.smooth_l1_loss(loc_p, loc_t, reduction="sum")
# loss_conf1
# CrossEntropy
batch_conf = conf_data.view(-1, num_classes) # (num_batch*num_box, num_classes)
loss_conf = F.cross_entropy(batch_conf, conf_t_label.view(-1), reduction="none")
# hard negative mining
num_pos = pos_mask.long().sum(1, keepdim=True)
loss_conf = loss_conf.view(num_batch, -1) # torch.size([num_batch, 8732])
_, loss_idx = loss_conf.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
# idx_rank chính là thông số để biết được độ lớn loss nằm ở vị trí bao nhiêu
num_neg = torch.clamp(num_pos * self.neg_pos, max=num_dbox)
neg_mask = idx_rank < (num_neg).expand_as(idx_rank)
# (num_batch, 8732) -> (num_batch, 8732, 2)
pos_idx_mask = pos_mask.unsqueeze(2).expand_as(conf_data)
neg_idx_mask = neg_mask.unsqueeze(2).expand_as(conf_data)
conf_t_pre = conf_data[(pos_idx_mask + neg_idx_mask).gt(0)].view(-1, num_classes)
conf_t_label_ = conf_t_label[(pos_mask + neg_mask).gt(0)]
loss_conf = F.cross_entropy(conf_t_pre, conf_t_label_, reduction="sum")
# total loss = loss_loc + loss_conf
N = num_pos.sum()
loss_loc = loss_loc / N
loss_conf = loss_conf / N
return loss_loc, loss_conf | 43.945205 | 125 | 0.649938 |
aa498abe99973fbe01588788e477d78243873a9f | 5,518 | py | Python | contrib/seeds/makeseeds.py | emberce/Phore | ff388a81e8c9546eb01ab754ada86e560c63ea6e | [
"MIT"
] | 1 | 2018-12-02T18:47:54.000Z | 2018-12-02T18:47:54.000Z | contrib/seeds/makeseeds.py | emberce/Phore | ff388a81e8c9546eb01ab754ada86e560c63ea6e | [
"MIT"
] | 1 | 2018-06-17T19:33:23.000Z | 2018-06-17T19:33:23.000Z | contrib/seeds/makeseeds.py | emberce/Phore | ff388a81e8c9546eb01ab754ada86e560c63ea6e | [
"MIT"
] | 2 | 2018-04-01T03:11:02.000Z | 2018-05-23T17:27:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/AtheneumCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.081395 | 186 | 0.567235 |
84209ca2520b89d631856afb67be5aa4a2cd16a4 | 55,071 | py | Python | Lib/test/test_concurrent_futures.py | Horcruxes/cpython | 1cbaa505d007e11c4a1f0d2073d72b6c02c7147c | [
"0BSD"
] | 33 | 2021-07-25T14:23:35.000Z | 2022-03-31T00:17:30.000Z | Lib/test/test_concurrent_futures.py | Horcruxes/cpython | 1cbaa505d007e11c4a1f0d2073d72b6c02c7147c | [
"0BSD"
] | 148 | 2020-02-26T01:08:34.000Z | 2022-03-01T15:00:59.000Z | Lib/test/test_concurrent_futures.py | Horcruxes/cpython | 1cbaa505d007e11c4a1f0d2073d72b6c02c7147c | [
"0BSD"
] | 3 | 2021-09-30T11:22:32.000Z | 2022-02-17T11:19:14.000Z | from test import support
from test.support import import_helper
from test.support import threading_helper
# Skip tests if _multiprocessing wasn't built.
import_helper.import_module('_multiprocessing')
from test.support import hashlib_helper
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool, _check_system_limits
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, mgr):
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = threading_helper.threading_setup()
def tearDown(self):
support.reap_children()
threading_helper.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
return super().get_context()
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
def test_cancel_futures(self):
executor = self.executor_type(max_workers=3)
fs = [executor.submit(time.sleep, .1) for _ in range(50)]
executor.shutdown(cancel_futures=True)
# We can't guarantee the exact number of cancellations, but we can
# guarantee that *some* were cancelled. With setting max_workers to 3,
# most of the submitted futures should have been cancelled.
cancelled = [fut for fut in fs if fut.cancelled()]
self.assertTrue(len(cancelled) >= 35, msg=f"{len(cancelled)=}")
# Ensure the other futures were able to finish.
# Use "not fut.cancelled()" instead of "fut.done()" to include futures
# that may have been left in a pending state.
others = [fut for fut in fs if not fut.cancelled()]
for fut in others:
self.assertTrue(fut.done(), msg=f"{fut._state=}")
self.assertIsNone(fut.exception())
# Similar to the number of cancelled futures, we can't guarantee the
# exact number that completed. But, we can guarantee that at least
# one finished.
self.assertTrue(len(others) > 0, msg=f"{len(others)=}")
def test_hang_issue39205(self):
"""shutdown(wait=False) doesn't hang at exit with running futures.
See https://bugs.python.org/issue39205.
"""
if self.executor_type == futures.ProcessPoolExecutor:
raise unittest.SkipTest(
"Hangs due to https://bugs.python.org/issue39205")
rc, out, err = assert_python_ok('-c', """if True:
from concurrent.futures import {executor_type}
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
t = {executor_type}(max_workers=3)
t.submit(sleep_and_print, 1.0, "apple")
t.shutdown(wait=False)
""".format(executor_type=self.executor_type.__name__))
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the threads when calling
# shutdown with wait=False
executor = futures.ThreadPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
threads = executor._threads
executor.shutdown(wait=False)
for t in threads:
t.join()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
support.gc_collect() # For PyPy or other GCs.
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
support.gc_collect() # For PyPy or other GCs.
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
def test_cancel_futures_wait_false(self):
# Can only be reliably tested for TPE, since PPE often hangs with
# `wait=False` (even without *cancel_futures*).
rc, out, err = assert_python_ok('-c', """if True:
from concurrent.futures import ThreadPoolExecutor
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
t = ThreadPoolExecutor()
t.submit(sleep_and_print, .1, "apple")
t.shutdown(wait=False, cancel_futures=True)
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
def acquire_lock(lock):
lock.acquire()
mp_context = get_context()
sem = mp_context.Semaphore(0)
for _ in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._processes), 3)
for _ in range(3):
sem.release()
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
executor_manager_thread = executor._executor_manager_thread
processes = executor._processes
call_queue = executor._call_queue
executor_manager_thread = executor._executor_manager_thread
del executor
support.gc_collect() # For PyPy or other GCs.
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
executor_manager_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the processes when calling
# shutdown with wait=False
executor = futures.ProcessPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
processes = executor._processes
call_queue = executor._call_queue
executor_manager_thread = executor._executor_manager_thread
executor.shutdown(wait=False)
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
executor_manager_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
# Make sure the results were all computed before the executor got
# shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO([email protected]): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertRaises(TypeError):
self.executor.submit(fn=capture, arg=1)
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
@unittest.skipUnless(hasattr(os, 'register_at_fork'), 'need os.register_at_fork')
def test_hang_global_shutdown_lock(self):
# bpo-45021: _global_shutdown_lock should be reinitialized in the child
# process, otherwise it will never exit
def submit(pool):
pool.submit(submit, pool)
with futures.ThreadPoolExecutor(1) as pool:
pool.submit(submit, pool)
for _ in range(50):
with futures.ProcessPoolExecutor(1, mp_context=get_context('fork')) as workers:
workers.submit(tuple)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@hashlib_helper.requires_hashdigest('md5')
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
mgr = get_context(self.ctx).Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
# explicitly destroy the object to ensure that EventfulGCObj.__del__()
# is called while manager is still running.
obj = None
support.gc_collect()
mgr.shutdown()
mgr.join()
def test_saturation(self):
executor = self.executor_type(4)
mp_context = get_context()
sem = mp_context.Semaphore(0)
job_count = 15 * executor._max_workers
try:
for _ in range(job_count):
executor.submit(sem.acquire)
self.assertEqual(len(executor._processes), executor._max_workers)
for _ in range(job_count):
sem.release()
finally:
executor.shutdown()
def test_idle_process_reuse_one(self):
executor = self.executor_type(4)
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._processes), 1)
executor.shutdown()
def test_idle_process_reuse_multiple(self):
executor = self.executor_type(4)
executor.submit(mul, 12, 7).result()
executor.submit(mul, 33, 25)
executor.submit(mul, 25, 26).result()
executor.submit(mul, 18, 29)
self.assertLessEqual(len(executor._processes), 2)
executor.shutdown()
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
raise Err()
def _raise_error_ignore_stderr(Err):
"""Function that raises an Exception in process and ignores stderr."""
import io
sys.stderr = io.StringIO()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error_ignore_stderr, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = support.SHORT_TIMEOUT
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def _check_crash(self, error, func, *args, ignore_stderr=False):
# test for deadlock caused by crashes in a pool
self.executor.shutdown(wait=True)
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
if ignore_stderr:
cm = support.captured_stderr()
else:
cm = contextlib.nullcontext()
try:
with self.assertRaises(error):
with cm:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_error_at_task_pickle(self):
# Check problem occurring while pickling a task in
# the task_handler thread
self._check_crash(PicklingError, id, ErrorAtPickle())
def test_exit_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, ExitAtUnpickle())
def test_error_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, ErrorAtUnpickle())
def test_crash_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, CrashAtUnpickle())
def test_crash_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(BrokenProcessPool, _crash)
def test_exit_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(SystemExit, _exit)
def test_error_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(RuntimeError, _raise_error, RuntimeError)
def test_crash_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(BrokenProcessPool, _return_instance, CrashAtPickle)
def test_exit_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(SystemExit, _return_instance, ExitAtPickle)
def test_error_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(PicklingError, _return_instance, ErrorAtPickle)
def test_error_during_result_unpickle_in_result_handler(self):
# Check problem occurring while unpickling a task in
# the result_handler thread
self._check_crash(BrokenProcessPool,
_return_instance, ErrorAtUnpickle,
ignore_stderr=True)
def test_exit_during_result_unpickle_in_result_handler(self):
# Check problem occurring while unpickling a task in
# the result_handler thread
self._check_crash(BrokenProcessPool, _return_instance, ExitAtUnpickle)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
def test_shutdown_deadlock_pickle(self):
# Test that the pool calling shutdown with wait=False does not cause
# a deadlock if a task fails at pickle after the shutdown call.
# Reported in bpo-39104.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
# Start the executor and get the executor_manager_thread to collect
# the threads and avoid dangling thread that should be cleaned up
# asynchronously.
executor.submit(id, 42).result()
executor_manager = executor._executor_manager_thread
# Submit a task that fails at pickle and shutdown the executor
# without waiting
f = executor.submit(id, ErrorAtPickle())
executor.shutdown(wait=False)
with self.assertRaises(PicklingError):
f.result()
# Make sure the executor is eventually shutdown and do not leave
# dangling threads
executor_manager.join()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO([email protected]): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO([email protected]): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError,
f1.result, timeout=support.SHORT_TIMEOUT)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
def setUpModule():
unittest.addModuleCleanup(multiprocessing.util._cleanup_tests)
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| 35.76039 | 95 | 0.618184 |
6452c8a81d8e93d9fd2794db2306cbf9b018a064 | 5,057 | py | Python | setup.py | Naveenaidu/moban | 5a4d41546a99f66b39e7deb3e216c9238fa3b07b | [
"MIT"
] | null | null | null | setup.py | Naveenaidu/moban | 5a4d41546a99f66b39e7deb3e216c9238fa3b07b | [
"MIT"
] | null | null | null | setup.py | Naveenaidu/moban | 5a4d41546a99f66b39e7deb3e216c9238fa3b07b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Template by pypi-mobans
import os
import sys
import codecs
from shutil import rmtree
from setuptools import Command, setup, find_packages
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
NAME = 'moban'
AUTHOR = 'C. W.'
VERSION = '0.3.10'
EMAIL = '[email protected]'
LICENSE = 'MIT'
ENTRY_POINTS = {
'console_scripts': [
'moban = moban.main:main'
],
}
DESCRIPTION = (
'Yet another jinja2 cli command for static text generation'
)
URL = 'https://github.com/moremoban/moban'
DOWNLOAD_URL = '%s/archive/0.3.10.tar.gz' % URL
FILES = ['README.rst', 'CONTRIBUTORS.rst', 'CHANGELOG.rst']
KEYWORDS = [
'python',
'jinja2',
'moban',
]
CLASSIFIERS = [
'Topic :: Software Development :: Libraries',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
INSTALL_REQUIRES = [
'ruamel.yaml==0.15.87',
'jinja2>=2.7.1',
'lml>=0.0.9',
'appdirs==1.4.3',
'crayons',
'GitPython==2.1.11',
'git-url-parse',
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=['ez_setup', 'examples', 'tests'])
EXTRAS_REQUIRE = {
}
# You do not need to read beyond this line
PUBLISH_COMMAND = '{0} setup.py sdist bdist_wheel upload -r pypi'.format(
sys.executable)
GS_COMMAND = ('gs moban v0.3.10 ' +
"Find 0.3.10 in changelog for more details")
NO_GS_MESSAGE = ('Automatic github release is disabled. ' +
'Please install gease to enable it.')
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
HERE = os.path.abspath(os.path.dirname(__file__))
class PublishCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package on github and pypi'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(HERE, 'dist'))
rmtree(os.path.join(HERE, 'build'))
rmtree(os.path.join(HERE, 'moban.egg-info'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG % PUBLISH_COMMAND)
sys.exit()
SETUP_COMMANDS.update({
'publish': PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, 'r', 'utf-8') as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith('.. testcode:'):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(' '):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ['|version|', '|today|']:
if keyword in line:
break
else:
yield line
if __name__ == '__main__':
setup(
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
extras_require=EXTRAS_REQUIRE,
tests_require=['nose'],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
entry_points=ENTRY_POINTS,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
| 25.80102 | 76 | 0.595412 |
7005212e52d344aec7a9101eb4ae457a5712bc37 | 771 | py | Python | Backend/venv/Lib/site-packages/google/cloud/firestore_admin_v1/services/firestore_admin/__init__.py | homer65/fellowcoder | 89433e1d44db64d9aa64a8603a7a38bcd220f035 | [
"Apache-2.0"
] | 2 | 2021-09-17T10:55:14.000Z | 2021-09-17T10:55:38.000Z | Backend/venv/Lib/site-packages/google/cloud/firestore_admin_v1/services/firestore_admin/__init__.py | homer65/fellowcoder | 89433e1d44db64d9aa64a8603a7a38bcd220f035 | [
"Apache-2.0"
] | 2 | 2020-04-01T07:31:01.000Z | 2020-11-30T07:03:51.000Z | Backend/venv/Lib/site-packages/google/cloud/firestore_admin_v1/services/firestore_admin/__init__.py | homer65/fellowcoder | 89433e1d44db64d9aa64a8603a7a38bcd220f035 | [
"Apache-2.0"
] | 1 | 2020-10-04T12:11:36.000Z | 2020-10-04T12:11:36.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import FirestoreAdminClient
from .async_client import FirestoreAdminAsyncClient
__all__ = (
"FirestoreAdminClient",
"FirestoreAdminAsyncClient",
)
| 30.84 | 74 | 0.756161 |
748d557c125cd1f5269c44ca3909cbb4b4f55b20 | 4,145 | py | Python | bin/attrib_to_categorical.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | 1 | 2020-11-25T06:56:43.000Z | 2020-11-25T06:56:43.000Z | bin/attrib_to_categorical.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | null | null | null | bin/attrib_to_categorical.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os
import argparse
import numpy as np
import pandas as pd
"""
Helper CLI for converting raw attribute data (one hot encoded) into classes
"""
def get_args():
"""
Gets command line args. Includes a number of defaults for input and output paths.
:return: argparse arguments object
"""
data_path = os.path.join('data', 'plans',
'HHsPerson2016.csv')
# keep = ['recID', 'bname', 'Freq16']
# categories = {'hsize': ['hsize1', 'hsize2', 'hsize3', 'hsize4'],
# 'car': ['car0', 'car1', 'car2'],
# 'inc': ['inc12', 'inc34', 'inc56', 'inc7p'],
# 'hstr': ['hstr1', 'hstr2', 'hstr3'],
# 'gender': ['male', 'female'],
# 'age': ['age5', 'age11', 'age18p', 'age30', 'age65', 'age65p'],
# 'race': ['white', 'mixed', 'india', 'pakbag', 'asian', 'black'],
# 'license': ['pdlcar', 'pdlnone'],
# 'job': ['ft', 'pt', 'edu', 'retired'],
# 'occ': ['occ1', 'occ2', 'occ3', 'occ4', 'occ5', 'occ6', 'occ7', 'occ8']
# }
keep = ['thid', 'tpid', 'hincome', 'age', 'Borough', 'Freq16'] # columns to keep as is
categories = {
'day': ['mon', 'tues', 'wed', 'thur', 'fri', 'sat', 'sun'],
'hsize': ['hsize1', 'hsize2', 'hsize3', 'hsize4', 'hsize5', 'hsize6p'],
'car': ['car0', 'car1', 'car2', 'car2p'],
'hstr': ['hstr1', 'hstr2', 'hstr3', 'hstr4', 'hstr5', 'hstr6'],
'gender': ['male', 'female'],
'age': ['age5', 'age11', 'age16', 'age18', 'age30', 'age65', 'age65p'],
'race': ['white', 'mixed', 'india', 'pakbag', 'asian', 'black'],
'license': ['pdlcar', 'pdlnone'],
'job': ['ft', 'pt', 'student', 'retired'],
'occ': ['occ1', 'occ2', 'occ3', 'occ4', 'occ5', 'occ6', 'occ7', 'occ8'],
}
parser = argparse.ArgumentParser()
parser.add_argument('--att', '-I', default=data_path, type=str,
help="Population attributes path (.csv)")
parser.add_argument('--out', '-O', default=None, type=str,
help="Population attributes path (.csv)")
parser.add_argument('--verbose', '-V', action='store_true')
arguments = parser.parse_args()
arguments.categories = categories
arguments.keep = keep
if not arguments.out:
name = os.path.basename(arguments.att).split('.')[0] + '_cat.csv'
arguments.out = os.path.join('outputs', name)
print("\t> pop attribute inputs from: {}".format(arguments.att))
print("\t> output to: {}".format(arguments.out))
print("\t> conversion using:")
for name, columns in arguments.categories.items():
print("{}: {}".format(name, columns))
return arguments
def get_category(row, columns):
if sum(row) == 0:
return 'unknown'
for column in columns:
if row[column] == 1:
return column
def to_categorical(args):
# convert csv to categorical format
attributes_raw = pd.read_csv(args.att)
attributes_categorical = attributes_raw.loc[:, args.keep]
for category, columns in args.categories.items():
for column in columns:
assert column in attributes_raw.columns, '{} header not found in input data headers'.format(column)
# select input columns for category
cols = attributes_raw[columns]
# extract category from boolean
cat = cols.apply(get_category, args=(columns,), axis=1)
# cat = pd.Series(cols.columns[np.where(cols != 0)[1]]) # this is fast but can't deal with missing 1
# add to new df
attributes_categorical[category] = cat
return attributes_categorical
if __name__ == '__main__':
args = get_args()
print('converting data...')
df = to_categorical(args)
print('saving to disk as {}'.format(args.out))
df.to_csv(args.out, index=False)
print('done')
#
# tree = ET.parse('tfl_pop_transformer/test1.xml')
# root = tree.getroot()
# print(len(root))
# for child in root:
# print(child.attrib['id']) | 36.043478 | 111 | 0.558987 |
608c2f9766a40c5d38a4b87b69c1d73a80494e81 | 33,189 | py | Python | SprintMgr/Model.py | esitarski/CrossMgr | de33b5ed662556ec659e6e2910f5fd0f88f25fa0 | [
"MIT"
] | 25 | 2015-02-26T01:26:10.000Z | 2022-03-25T15:46:55.000Z | SprintMgr/Model.py | esitarski/CrossMgr | de33b5ed662556ec659e6e2910f5fd0f88f25fa0 | [
"MIT"
] | 76 | 2015-12-09T04:24:30.000Z | 2022-02-18T16:39:28.000Z | SprintMgr/Model.py | esitarski/CrossMgr | de33b5ed662556ec659e6e2910f5fd0f88f25fa0 | [
"MIT"
] | 17 | 2015-04-23T07:37:13.000Z | 2020-01-22T17:47:16.000Z | import sys
import copy
import random
import datetime
import traceback
from operator import attrgetter
from collections import defaultdict
import Utils
QualifyingTimeDefault = 99*60*60
Sprint200mQualificationCompetitionTime = 60.0
SprintFinalCompetitionTime = 3*60.0
KeirinCompetitionTime = 5*60.0
class Rider:
status = ''
seeding_rank = 0
uci_points = 0
fields = { 'bib', 'first_name', 'last_name', 'team', 'team_code', 'uci_id', 'qualifying_time', 'uci_points', 'seeding_rank', 'status' }
extended_fields = fields | {'full_name', 'bib_full_name', 'uci_points_text', 'short_name', 'long_name'}
def __init__( self, bib,
first_name = '', last_name = '', team = '', team_code = '', uci_id = '',
qualifying_time = QualifyingTimeDefault,
uci_points = 0,
seeding_rank = 0,
status = ''
):
self.bib = int(bib)
self.first_name = first_name
self.last_name = last_name
self.team = team
self.team_code = team_code
self.uci_id = uci_id
self.qualifying_time = float(qualifying_time)
self.iSeeding = 0 # Actual value in the seeding list.
self.uci_points = float(uci_points or 0)
self.seeding_rank = int(seeding_rank or 0)
self.status = status
named_aliases = (
(('bib#','bib_#','#','num','bibnum','bib_num',), 'bib'),
(('name','rider_name'), 'full_name'),
(('first','fname','firstname','rider_first'), 'first_name'),
(('last','lname','lastname','rider_last',), 'last_name'),
(('uciid','rider_uciid',), 'uci_id'),
(('ucipoints','points','riderucipoints','rider_ucipoints',), 'uci_points'),
(('qualifying','time','rider_time',), 'qualifying_time'),
(('rank', 'seedingrank',), 'seeding_rank'),
(('rider_team',), 'team'),
(('teamcode',), 'team_code'),
(('rider_status',), 'status'),
)
aliases = {}
for names, key in named_aliases:
aliases[key] = key
for n in names:
aliases[n] = key
@staticmethod
def GetHeaderNameMap( headers ):
header_map = {}
for col, h in enumerate(headers):
h = h.strip().replace(' ', '_').lower()
h = Rider.aliases.get( h, h )
if h in Rider.extended_fields:
header_map[h] = col
return header_map
def isOpen( self ):
return self.last_name == 'OPEN'
def copyDataFields( self, r ):
if r == self:
return
fields = ('first_name', 'last_name', 'team', 'team_code', 'uci_id', 'uci_points', 'seeding_rank')
for attr in fields:
setattr( self, attr, getattr(r, attr) )
return self
def key( self ):
return tuple( getattr(self, a) for a in ('bib', 'first_name', 'last_name', 'team', 'team_code', 'uci_id', 'qualifying_time', 'uci_points', 'seeding_rank', ) )
@staticmethod
def getKeyQualifying( isKeirin ):
if isKeirin:
return attrgetter('status', 'iSeeding')
else:
return attrgetter('status', 'qualifying_time', 'iSeeding')
@property
def qualifying_time_text( self ):
return Utils.SecondsToStr(self.qualifying_time) if self.qualifying_time < QualifyingTimeDefault else ''
@property
def uci_points_text( self ):
return '{:.2f}'.format(self.uci_points) if self.uci_points else ''
@property
def full_name( self ):
return ', '.join( n for n in (self.last_name.upper(), self.first_name) if n )
@property
def bib_full_name( self ):
return '({}) {}'.format( self.bib, self.full_name ) if self.bib else self.full_name
@property
def short_name( self ):
if self.last_name and self.first_name:
return '{}, {}.'.format(self.last_name.upper(), self.first_name[:1])
return self.last_name.upper() if self.last_name else self.first_name
@property
def bib_short_name( self ):
return '{} {}'.format(self.bib, self.short_name)
@property
def long_name( self ):
n = self.full_name
return '{} ({})'.format(n, self.team) if self.team else n
def __repr__( self ):
return '{}'.format(self.bib)
#------------------------------------------------------------------------------------------------
class State:
def __init__( self ):
self.labels = {} # Riders bound to competition labels.
self.noncontinue = {}
self.OpenRider = Rider( bib=0, last_name='OPEN', qualifying_time=QualifyingTimeDefault + 1.0 )
def setQualifyingInfo( self, riders, competition ):
riders = sorted(
(r for r in riders if r.status != 'DNQ'),
key = Rider.getKeyQualifying(competition.isKeirin),
)[:competition.starters]
self.labels = { 'N{}'.format(i):rider for i, rider in enumerate(riders,1) }
# Initialize extra open spaces to make sure we have enough starters.
self.labels.update( {'N{}'.format(i):self.OpenRider for i in range(len(riders)+1, 128)} )
self.OpenRider.qualifying_time = QualifyingTimeDefault + 1.0
def inContention( self, label ):
return self.labels.get(label, None) != self.OpenRider and label not in self.noncontinue
def canReassignStarters( self ):
''' Check if no competitions have started and we can reasign starters. '''
return all( label.startswith('N') for label in self.labels.keys() )
def __repr__( self ):
st = [(k,v) for k,v in self.labels.items() if not str(v).startswith('0')]
return ','.join('{}:{}'.format(k,v) for k,v in st) if st else '<<< no state >>>'
#------------------------------------------------------------------------------------------------
class Start:
placesTimestamp = None # Timestamp when places were modified.
finishCode = {
'Inside': 1,
'DNF': 2,
'DNS': 3,
'DQ': 4,
}
warning = set()
def __init__( self, event, lastStart ):
self.event = event
self.lastStart = lastStart
self.startPositions = []
self.finishPositions = [] # id, including finishers, DNF and DNS.
self.continuingPositions = [] # id, including finishers - no DNF and DNS.
self.places = {} # In the format of places[composition] = place, place in 1, 2, 3, 4, etc.
self.times = {} # In the format of times[1] = winner's time, times[2] = runner up's time, etc.
self.relegated = set() # Rider assigned a relegated position in this heat.
self.inside = [] # Rider required to take inside position on next start.
self.noncontinue = {} # In the format of noncontinue[composition] = reason
self.restartRequired = False
self.canDrawLots = False
remainingComposition = self.getRemainingComposition()
if not lastStart:
self.heat = 1
self.firstStartInHeat = True
self.startPositions = [c for c in remainingComposition]
random.shuffle( self.startPositions )
self.canDrawLots = True
else:
if lastStart.restartRequired:
self.firstStartInHeat = False
self.heat = lastStart.heat
self.startPositions = [r for r in lastStart.inside] + \
[c for c in lastStart.startPositions if c not in lastStart.inside]
self.canDrawLots = False
else:
self.heat = lastStart.heat + 1
self.firstStartInHeat = True
if self.heat == 2:
# Find the non-restarted start of the heat.
s = lastStart
while s and not s.firstStartInHeat:
s = s.lastStart
self.startPositions = [r for r in lastStart.inside] + \
[c for c in reversed(s.startPositions) if c not in lastStart.inside]
self.canDrawLots = False
elif self.heat == 3:
if lastStart.inside:
# Don't randomize the start positions again if the last run had a relegation.
self.startPositions = [r for r in lastStart.inside] + \
[c for c in lastStart.startPositions if c not in lastStart.inside]
self.canDrawLots = False
else:
# Randomize the start positions again.
self.startPositions = [c for c in remainingComposition]
random.shuffle( self.startPositions )
self.canDrawLots = True
else:
assert False, 'Cannot have more than 3 heats'
state = event.competition.state
self.startPositions = [c for c in self.startPositions if state.inContention(c)]
if self.event.competition.isMTB:
self.startPositions.sort( key=lambda c: state.labels[c].bib )
def isHanging( self ):
''' Check if there are no results, and this is not a restart. If so, this start was interrupted and needs to be removed. '''
if self.restartRequired:
return False
if self.places:
return False
return True
def setStartPositions( self, startSequence ):
''' startPositions is of the form [(bib, status), (bib, status), ...] '''
state = self.event.competition.state
remainingComposition = self.getRemainingComposition()
bibToId = dict( (state.labels[c].bib, c) for c in remainingComposition )
startIdPosition = { id : i+1000 for i, id in enumerate(self.startPositions) }
for p, (bib, status) in enumerate(startSequence):
id = bibToId[int(bib)]
startIdPosition[id] = p
if status:
self.noncontinue[id] = status
else:
self.noncontinue.pop(id, None)
self.startPositions = [id for p, id in sorted((p, id) for id, p in startIdPosition.items())]
def setPlaces( self, places ):
''' places is of the form [(bib, status, warning, relegation), (bib, status, warning, relegation), ...] '''
state = self.event.competition.state
remainingComposition = self.getRemainingComposition()
bibToId = { state.labels[c].bib: c for c in remainingComposition }
self.noncontinue = {}
self.warning = set()
self.places = {}
self.finishPositions = []
# Correct for status information.
finishCode = self.finishCode
statusPlaceId = []
place = 0
for bib, status, warning, relegation in places:
id = bibToId[int(bib)]
if finishCode.get(status,0) >= 2:
self.noncontinue[id] = status
if status == 'Inside':
self.addInside( id )
if finishCode.get(status,0) <= 3:
place += 1
statusPlaceId.append( (finishCode.get(status,0), place, id) )
if ('{}'.format(warning)[:1] or '0') in '1TtYy':
self.addWarning( id )
if ('{}'.format(relegation)[:1] or '0') in '1TtYy':
self.addRelegation( id )
statusPlaceId.sort()
self.places = { id : i+1 for i, (finishCode, place, id) in enumerate(statusPlaceId) if id not in self.noncontinue }
self.finishPositions = [ id for (finishCode, place, id) in statusPlaceId ]
self.continuingPositions = [ id for (finishCode, place, id) in statusPlaceId if id not in self.noncontinue ]
self.placesTimestamp = datetime.datetime.now()
def resetPlaces( self ):
# Fix up data from previous versions.
if hasattr(self, 'finishPositions'):
return
# Based on the known places and noncontinue status, set the places again so that the
# additional data structures get initialized.
state = self.event.competition.state
OpenRider = state.OpenRider
bibStatus = []
for pos, id in sorted( (pos, id) for pos, id in self.places.items() ):
try:
bibStatus.append( (state.labels[id].bib, '') )
except KeyError:
pass
for id, status in self.noncontinue.items():
bibStatus.append( (state.labels[id].bib, status) )
self.setPlaces( bibStatus )
def setTimes( self, times ):
''' times is of the form [(pos, t), (pos, t), ...] - missing pos have no time '''
self.times = dict( times )
def addRelegation( self, id ):
if isinstance(self.relegated, list):
self.relegated = set( self.relegated )
self.relegated.add( id )
def addInside( self, id ):
self.inside.append( id )
def addWarning( self, id ):
self.warning.add( id )
def getRemainingComposition( self ):
state = self.event.competition.state
return [c for c in self.event.composition if state.inContention(c)]
#------------------------------------------------------------------------------------------------
class Event:
def __init__( self, rule, heatsMax=1 ):
assert '->' in rule, 'Rule must contain ->'
self.rule = rule.upper()
rule = rule.replace( '->', ' -> ').replace('-',' ')
fields = rule.split()
iSep = fields.index( '>' )
# Event transformation.
self.composition = fields[:iSep]
self.winner = fields[iSep+1] # Winner of competition.
self.others = fields[iSep+2:] # Other non-winners.
# If "others" are incomplete, assume classification by TT time.
self.others.extend( ['TT'] * (len(self.composition)-1 - len(self.others)) )
assert len(self.composition) == len(self.others) + 1, 'Rule outputs cannot exceed inputs.'
self.heatsMax = heatsMax # Number of heats required to decide the outcome.
# Convenience fields and are set by the competition.
self.competition = None
self.system = None
# State of the Event.
self.finishRiders, self.finishRiderPlace, self.finishRiderRank = [], {}, {}
self.starts = []
self.compositionRiders = [] # Input riders.
@property
def competitionTime( self ):
if self.competition.isSprint:
if self.competition.isKeirin:
return KeirinCompetitionTime
else:
return (1 if self.heatsMax == 1 else 1.5) * SprintFinalCompetitionTime
return None
@property
def isSemiFinal( self ):
try:
return self.competition.isMTB and self.system == self.competition.systems[-2]
except IndexError:
return False
@property
def isFinal( self ):
try:
return self.competition.isMTB and self.system == self.competition.systems[-1]
except IndexError:
return False
@property
def isSmallFinal( self ):
try:
return self.competition.isMTB and self.system == self.competition.systems[-1] and self == self.system.events[-2]
except IndexError:
return False
@property
def isBigFinal( self ):
try:
return self.competition.isMTB and self.system == self.competition.systems[-1] and self == self.system.events[-1]
except IndexError:
return False
@property
def output( self ):
return [self.winner] + self.others
def getHeat( self ):
heats = sum( 1 for s in self.starts if not s.restartRequired )
return min(heats, self.heatsMax)
def getHeatPlaces( self, heat ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
heatCur = 0
for start in self.starts:
if start.restartRequired:
continue
heatCur += 1
if heatCur != heat:
continue
placeStatus = start.noncontinue.copy()
for c in remainingComposition:
if c not in placeStatus:
placeStatus[c] = str(start.places.get(c, ''))
heatPlaces = [placeStatus.get(c, '') for c in remainingComposition]
heatPlaces = ['Win' if p == '1' else '-' for p in heatPlaces]
return heatPlaces
return [''] * len(remainingComposition)
def __repr__( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
remainingOthers = self.others[:len(remainingComposition)-1]
def labName( id ):
return '{}={:-12s}'.format(id, state.labels[id].full_name) if id in state.labels else '{}'.format(id)
s = '{}, Heat {}/{} Start {}: {} => {} {}'.format(
self.system.name,
self.getHeat(), self.heatsMax, len(self.starts),
' '.join(labName(c) for c in remainingComposition),
labName(self.winner),
' '.join(labName(c) for c in remainingOthers) )
return s
@property
def multi_line_name( self ):
return '{}\nHeat {}/{}'.format(self.system.name, self.getHeat(), self.heatsMax)
@property
def multi_line_bibs( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
return '\n'.join((str(state.labels[c].bib)) for c in remainingComposition)
@property
def multi_line_rider_names( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
return '\n'.join(state.labels[c].full_name for c in remainingComposition)
@property
def multi_line_rider_teams( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
return '\n'.join(state.labels[c].team for c in remainingComposition)
@property
def multi_line_inlabels( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
return '\n'.join( remainingComposition )
@property
def multi_line_outlabels( self ):
state = self.competition.state
remainingComposition = [c for c in self.composition if state.inContention(c)]
outlabels = [self.winner]
outlabels.extend( self.others[0:len(remainingComposition)-1] )
return '\n'.join( outlabels )
def getRepr( self ):
return self.__repr__()
def getStart( self ):
if not self.canStart():
return None
self.starts.append( Start(self, self.starts[-1] if self.starts else None) )
return self.starts[-1]
def isFinished( self ):
return self.winner in self.competition.state
def canStart( self ):
state = self.competition.state
return (
all(c in state.labels for c in self.composition) and
any(state.inContention(c) for c in self.composition) and
self.winner not in state.labels
)
def setFinishRiders( self, places ):
finishCode = Start.finishCode
state = self.competition.state
OpenRider = state.OpenRider
noncontinue = state.noncontinue
infoSort = []
for place, id in enumerate(places):
rider = state.labels.get(id, OpenRider)
infoSort.append( (finishCode.get(noncontinue.get(id,''),0), place, rider.qualifying_time, rider, noncontinue.get(id,'')) )
infoSort.sort()
self.finishRiders = [rider for state, place, qualifying_time, rider, nc in infoSort]
self.finishRiderRank = { rider: p+1 for p, (state, place, qualifying_time, rider, nc) in enumerate(infoSort) }
self.finishRiderPlace = { rider: nc if nc else p+1 for p, (state, place, qualifying_time, rider, nc) in enumerate(infoSort) }
def getCompositionRiders( self, places ):
state = self.competition.state
OpenRider = state.OpenRider
return [state.labels.get(p,OpenRider) for p in places]
def propagate( self ):
if not self.canStart():
#print( ', '.join(self.composition), 'Cannot start or already finished - nothing to propagate' )
return False
state = self.competition.state
# Update all non-continuing riders into the competition state.
for s in self.starts:
s.resetPlaces()
state.noncontinue.update( s.noncontinue )
self.finishRiders, self.finishRiderPlace = [], {}
self.compositionRiders = self.getCompositionRiders(self.composition)
# Check for default winner(s).
availableStarters = [c for c in self.composition if c not in state.noncontinue]
# Single sprint case.
if len(availableStarters) == 1:
# Set the default winner.
state.labels[self.winner] = state.labels[availableStarters[0]]
self.setFinishRiders(self.composition)
# Mark the "others" as open riders.
for o in self.others:
state.labels[o] = state.OpenRider
return True
# Check if we have a rider with a majority of wins in the heats.
winCount = defaultdict( int )
for s in self.starts:
if s.restartRequired:
continue
winnerId = s.continuingPositions[0]
winCount[winnerId] += 1
if winCount[winnerId] < self.heatsMax - 1:
continue
# We have a winner of the event. Propagate the results.
state.labels[self.winner] = state.labels[winnerId]
for o, c in zip(self.others, s.continuingPositions[1:]):
state.labels[o] = state.labels[c]
# Set any extra others to "OpenRider".
for o in self.others[len(s.continuingPositions)-1:]:
state.labels[o] = state.OpenRider
# Create the list of finish positions to match the event finish.
self.setFinishRiders( s.finishPositions if self.heatsMax == 1 else s.continuingPositions )
return True
return False
#------------------------------------------------------------------------------------------------
class Competition:
def __init__( self, name, systems ):
self.name = name
self.systems = systems
self.state = State()
# Check that there are no repeated labels in the spec.
inLabels = set()
outLabels = set()
self.starters = 0
starterLabels = set()
self.isMTB = 'MTB' in name
self.isSprint = not self.isMTB
self.isKeirin = self.isSprint and 'Keirin' in name
byeEvents = set()
byeOutcomeMap = {}
ttCount = 0
for iSystem, system in enumerate(self.systems):
rrCount = 0
for e in system.events:
e.competition = self
e.system = system
# Assign unique outcomes for eliminated riders.
if not self.isMTB:
# If Track, non-winners in each round are ranked based on qualifying time only.
for i, other in enumerate(e.others):
if other == 'TT':
ttCount += 1
e.others[i] = '{}TT'.format(ttCount)
else:
# If MTB, the non-winners get credit for the round and finish position.
for i, other in enumerate(e.others):
if other == 'TT':
rrCount += 1
e.others[i] = '{}RR_{}_{}'.format(rrCount, iSystem+1, i+2) # Label is nnRR_ro_fo where nn=unique#, ro=round, fo=finishOrder
print( 'Event:', ' - '.join(e.composition), ' -> ', e.winner, e.others )
for c in e.composition:
assert c not in inLabels, '{}-{} c={}, outLabels={}'.format(e.competition.name, e.system.name, c, ','.join( sorted(outLabels) ))
inLabels.add( c )
if c.startswith('N'):
self.starters += 1
assert c[1:].isdigit(), '{}-{} Non-numeric starter reference "{}"'.format(e.competition.name, e.system.name, c)
starterLabels.add( int(c[1:]) )
else:
assert c in outLabels, '{}-{} Rule uses undefined input label "{}"'.format(e.competition.name, e.system.name, c)
assert e.winner not in outLabels, '{}-{} winner: {}, outLabels={}'.format(
e.competition.name, e.system.name, e.winner, ','.join( sorted(outLabels) ))
outLabels.add( e.winner )
for c in e.others:
assert c not in outLabels, '{}-{} other label: {} is already in outLabels={}'.format(
e.competition.name, e.system.name, c, ','.join( outLabels ))
outLabels.add( c )
assert len(outLabels) <= len(inLabels), '{}-{} len(outLabels)={} exceeds len(inLabels)={}\n {}\n {}'.format(
e.competition.name, e.system.name, len(outLabels), len(inLabels), ','.join(inLabels), ','.join(outLabels) )
# Check if this is a bye Event.
# We handle this by deleting the event and substituting the output value as the input in subsequent events.
if not e.others:
byeEvents.add( e )
byeOutcomeMap[e.winner] = e.composition[0]
assert self.starters != 0, '{}-{} No starters. Check for missing N values'.format(
e.competition.name, e.system.name )
assert self.starters == len(starterLabels), '{}-{} Starters reused in input'.format(
e.competition.name, e.system.name )
assert self.starters == max( s for s in starterLabels), '{}-{} Starter references are not sequential'.format(
e.competition.name, e.system.name )
# Process Bye events (substitute outcome into composition of subsequent events, delete bye event).
# Assign indexes to each component for sorting purposes.
for j, system in enumerate(self.systems):
system.i = j
system.events = [e for e in system.events if e not in byeEvents]
for k, event in enumerate(system.events):
event.i = k
event.composition = [byeOutcomeMap.get(c,c) for c in event.composition]
def getRelegationsWarnings( self, bib, eventCur, before=False ):
relegations = 0
warnings = 0
for system, event in self.allEvents():
if before and event == eventCur:
break
for id in event.composition:
try:
if self.state.labels[id].bib == bib:
for start in event.starts:
if id in start.relegated:
relegations += 1
if id in start.warning:
warnings += 1
except KeyError:
pass
if event == eventCur:
break
return relegations, warnings
def getRelegationsWarningsStr( self, bib, eventCur, before=False ):
relegations, warnings = self.getRelegationsWarnings(bib, eventCur, before)
s = []
if warnings:
s.append( '{} {}'.format(warnings, 'Warn') )
if relegations:
s.append( '{} {}'.format(relegations, 'Rel') )
return ','.join( s )
def canReassignStarters( self ):
return self.state.canReassignStarters()
def allEvents( self ):
for system in self.systems:
for event in system.events:
yield system, event
@property
def heatsMax( self ):
return max( event.heatsMax for system, event in self.allEvents() )
@property
def competitionTime( self ):
return None if not self.isSprint else sum( event.competitionTime for system, event in self.allEvents() )
def reset( self ):
for system, event in self.allEvents():
for start in event.starts:
start.resetPlaces()
def __repr__( self ):
out = ['***** {}'.format(self.name)]
for s, e in self.allEvents():
out.append( ' '.join( [s.name, '[{}]'.format(','.join(e.composition)), ' --> ', '[{}]'.format(','.join(e.output))] ) )
return '\n'.join( out )
def fixHangingStarts( self ):
for s, e in self.allEvents():
while e.starts and e.starts[-1].isHanging():
del e.starts[-1]
def getCanStart( self ):
return [(s, e) for s, e in self.allEvents() if e.canStart()]
def propagate( self ):
while True:
success = False
for s, e in self.allEvents():
success |= e.propagate()
if not success:
break
labels = self.state.labels
return [ labels.get('{}R'.format(r+1), None) for r in range(self.starters) ]
def getRiderStates( self ):
riderState = defaultdict( set )
for id, reason in self.state.noncontinue.items():
riderState[reason].add( self.state.labels[id] )
DQs = riderState['DQ']
DNSs = set( e for e in riderState['DNS'] if e not in DQs )
DNFs = set( e for e in riderState['DNF'] if e not in DNSs and e not in DQs )
return DQs, DNSs, DNFs
def getResults( self ):
DQs, DNSs, DNFs = self.getRiderStates()
semiFinalRound, smallFinalRound, bigFinalRound = 60, 61, 62
riders = { rider for label, rider in self.state.labels.items() if label.startswith('N') }
Finisher, DNF, DNS, DQ = 1, 2, 3, 4
riderStatus = { rider: (DQ if rider in DQs else DNS if rider in DNSs else DNF if rider in DNFs else Finisher) for rider in riders }
statusText = {
Finisher: 'Finisher',
DNF: 'DNF',
DNS: 'DNS',
DQ: 'DQ',
}
if not self.isMTB:
# Rank the rest of the riders based on their results in the competition.
results = [None] * self.starters
for i in range(self.starters):
try:
results[i] = self.state.labels['{}R'.format(i+1)]
except KeyError:
pass
# Rank the remaining riders based on qualifying time (TT).
iTT = self.starters
tts = [rider for label, rider in self.state.labels.items() if label.endswith('TT')]
tts.sort( key = lambda r: r.qualifying_time, reverse = True ) # Sort these in reverse as we assign them in from most to least.
for rider in tts:
iTT -= 1
results[iTT] = rider
results = [('Finisher', r) for r in results if not r or not r.isOpen()]
# Purge unfillable spots from the results.
for r in (DNFs | DNSs | DQs):
try:
results.remove( (statusText[Finisher], None) )
except ValueError:
break
# Add the unclassifiable riders.
for classification, s in (('DNF',DNFs), ('DNS',DNSs), ('DQ', DQs)):
for r in sorted(s, key = lambda r: r.qualifying_time):
results.append( (classification, r) )
# Purge empty results, except at the top.
try:
i = next( j for j, r in enumerate(results) if r[1] ) # Find first non-empty result.
if i != 0:
results[i:] = [r for r in results[i:] if r[1]]
except StopIteration:
pass
# Assign classification for all finishers.
results = [(p+1 if classification == 'Finisher' else classification, rider) for p, (classification, rider) in enumerate(results)]
DNFs = set()
DNSs = set()
else:
# Rank the rest of the riders based on their results also considering the result of their last round.
abnormalFinishers = set()
compResults = []
for system in self.systems:
for event in system.events:
# Get the round of the event.
round = 1
if event.isSemiFinal:
round = semiFinalRound
elif event.isSmallFinal:
round = smallFinalRound
elif event.isBigFinal:
round = bigFinalRound
else:
for id in event.output:
if 'RR' in id:
round = int(id.split('_')[-2])
break
# Rank the finishers.
rank = 0
for i, id in enumerate(event.output):
try:
rider = event.finishRiders[i]
except IndexError:
rider = None
if rider in DQs:
continue
if id.endswith('R'):
rank = int(id[:-1])
isFinish = True
else:
try:
rank = int(id.split('_')[-1])
except ValueError:
rank = i + 1
isFinish = ('RR' in id)
if (isFinish and riderStatus.get(rider,1) == 1) or (round >= 1 and riderStatus.get(rider,1) != 1):
if riderStatus.get(rider,1) != 1:
abnormalFinishers.add( rider )
status = riderStatus.get(rider,1)
statTxt = statusText[Finisher] if status != DQ and round > 1 else statusText[status]
compResults.append( (
-round, status, rank,
rider.qualifying_time if rider else sys.float_info.max,
rider.seeding_rank if rider else 9999999,
rider.bib if rider else 999999,
random.random(), # Some insurance so that the sort does not fail.
statusText[status],
rider
) )
try:
compResults.sort()
except Exception as e:
print( '****', self.name )
raise e
results = [rr[-2:] for rr in compResults]
# Adjust the available finisher positions for the abnormal finishes.
for i in range(len(abnormalFinishers)):
try:
results.remove( (statusText[Finisher], None) )
except ValueError:
break
# Purge empty results, except at the top.
try:
i = next( j for j, r in enumerate(results) if r[1] ) # Find first non-empty result.
if i != 0:
results[i:] = [r for r in results[i:] if r[1]]
except StopIteration:
pass
# Investigate later - should not have to do this!
already_seen = set()
results_non_duplicated = []
for classification, rider in results:
if not rider or rider not in already_seen:
already_seen.add( rider )
results_non_duplicated.append( (classification, rider) )
results = results_non_duplicated
# Assign classification for all finishers.
results = [(p+1 if classification == 'Finisher' or rider in abnormalFinishers else classification, rider) for p, (classification, rider) in enumerate(results)]
DNFs = set()
DNSs = set()
return (
results,
sorted(DNFs, key = lambda r: (r.qualifying_time, -r.uci_points, r.iSeeding)),
sorted(DQs, key = lambda r: (r.qualifying_time, -r.uci_points, r.iSeeding))
)
class System:
def __init__( self, name, events ):
self.name = name
self.events = events
@property
def competitionTime( self ):
try:
return sum( event.competitionTime for event in self.events )
except TypeError:
return None
class Model:
communique_start = 100
modifier = 0
def __init__( self ):
self.competition_name = 'My Competition'
self.date = datetime.date.today()
self.category = 'My Category'
self.track = 'My Track'
self.organizer = 'My Organizer'
self.chief_official = 'My Chief Official'
self.competition = None
self.riders = []
self.changed = False
self.showResults = 0
self.communique_number = {}
@property
def competitionTime( self ):
try:
return self.competition.competitionTime + self.qualifyingCompetitionTime
except TypeError:
return None
@property
def qualifyingCompetitionTime( self ):
return None if self.competition.isMTB else len(self.riders) * Sprint200mQualificationCompetitionTime
@property
def isKeirin( self ):
return self.competition and self.competition.isKeirin
def getProperties( self ):
return { a : getattr(self, a) for a in ('competition_name', 'date', 'category', 'track', 'organizer', 'chief_official') }
def setProperties( self, properties ):
for a, v in properties.items():
setattr(self, a, v)
def updateSeeding( self ):
for iSeeding, rider in enumerate(self.riders, 1):
rider.iSeeding = iSeeding
def getDNQs( self ):
riders = sorted( self.riders, key = lambda r: r.keyQualifying() )
return riders[self.competition.starters:]
def setQualifyingInfo( self ):
self.updateSeeding()
self.competition.state.setQualifyingInfo( self.riders, self.competition )
def canReassignStarters( self ):
return self.competition.state.canReassignStarters()
def setChanged( self, changed=True ):
self.changed = changed
def setCompetition( self, competitionNew, modifier=0 ):
if self.competition.name == competitionNew.name and self.modifier == modifier:
return
stateSave = self.competition.state
self.competition = copy.deepcopy( competitionNew )
self.competition.state = stateSave
self.modifier = modifier
if modifier:
for system, event in self.competition.allEvents():
if modifier == 3:
event.heatsMax = 1
elif modifier == 2:
if '1/4' in system.name or '1/2' in system.name:
event.heatsMax = 1
elif modifier == 1:
if '1/4' in system.name:
event.heatsMax = 1
self.setChanged( True )
model = Model()
| 32.958292 | 162 | 0.659526 |
215b72e4debb86de578af98cf8b75ba4b2979e70 | 10,532 | py | Python | flexmeasures/data/services/forecasting.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 12 | 2021-12-18T10:41:10.000Z | 2022-03-29T23:00:29.000Z | flexmeasures/data/services/forecasting.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 103 | 2021-12-07T08:51:15.000Z | 2022-03-31T13:28:48.000Z | flexmeasures/data/services/forecasting.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 3 | 2022-01-18T04:45:48.000Z | 2022-03-14T09:48:22.000Z | from datetime import datetime, timedelta
from typing import List
from flask import current_app
import click
from rq import get_current_job
from rq.job import Job
from timetomodel.forecasting import make_rolling_forecasts
import timely_beliefs as tb
from flexmeasures.data import db
from flexmeasures.data.models.forecasting import lookup_model_specs_configurator
from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException
from flexmeasures.data.models.time_series import Sensor, TimedBelief
from flexmeasures.data.models.forecasting.utils import (
get_query_window,
check_data_availability,
)
from flexmeasures.data.utils import get_data_source, save_to_db
from flexmeasures.utils.time_utils import (
as_server_time,
server_now,
forecast_horizons_for,
supported_horizons,
)
"""
The life cycle of a forecasting job:
1. A forecasting job is born in create_forecasting_jobs.
2. It is run in make_rolling_viewpoint_forecasts or make_fixed_viewpoint_forecasts, which write results to the db.
This is also where model specs are configured and a possible fallback model is stored for step 3.
3. If an error occurs (and the worker is configured accordingly), handle_forecasting_exception comes in.
This might re-enqueue the job or try a different model (which creates a new job).
"""
# TODO: we could also monitor the failed queue and re-enqueue jobs who had missing data
# (and maybe failed less than three times so far)
class MisconfiguredForecastingJobException(Exception):
pass
def create_forecasting_jobs(
sensor_id: int,
start_of_roll: datetime,
end_of_roll: datetime,
resolution: timedelta = None,
horizons: List[timedelta] = None,
model_search_term="linear-OLS",
custom_model_params: dict = None,
enqueue: bool = True,
) -> List[Job]:
"""Create forecasting jobs by rolling through a time window, for a number of given forecast horizons.
Start and end of the forecasting jobs are equal to the time window (start_of_roll, end_of_roll) plus the horizon.
For example (with shorthand notation):
start_of_roll = 3pm
end_of_roll = 5pm
resolution = 15min
horizons = [1h, 6h, 1d]
This creates the following 3 jobs:
1) forecast each quarter-hour from 4pm to 6pm, i.e. the 1h forecast
2) forecast each quarter-hour from 9pm to 11pm, i.e. the 6h forecast
3) forecast each quarter-hour from 3pm to 5pm the next day, i.e. the 1d forecast
If not given, relevant horizons are derived from the resolution of the posted data.
The job needs a model configurator, for which you can supply a model search term. If omitted, the
current default model configuration will be used.
It's possible to customize model parameters, but this feature is (currently) meant to only
be used by tests, so that model behaviour can be adapted to test conditions. If used outside
of testing, an exception is raised.
if enqueue is True (default), the jobs are put on the redis queue.
Returns the redis-queue forecasting jobs which were created.
"""
if not current_app.testing and custom_model_params is not None:
raise MisconfiguredForecastingJobException(
"Model parameters can only be customized during testing."
)
if horizons is None:
if resolution is None:
raise MisconfiguredForecastingJobException(
"Cannot create forecasting jobs - set either horizons or resolution."
)
horizons = forecast_horizons_for(resolution)
jobs: List[Job] = []
for horizon in horizons:
job = Job.create(
make_rolling_viewpoint_forecasts,
kwargs=dict(
sensor_id=sensor_id,
horizon=horizon,
start=start_of_roll + horizon,
end=end_of_roll + horizon,
custom_model_params=custom_model_params,
),
connection=current_app.queues["forecasting"].connection,
ttl=int(
current_app.config.get(
"FLEXMEASURES_JOB_TTL", timedelta(-1)
).total_seconds()
),
)
job.meta["model_search_term"] = model_search_term
job.save_meta()
jobs.append(job)
if enqueue:
current_app.queues["forecasting"].enqueue_job(job)
return jobs
def make_fixed_viewpoint_forecasts(
sensor_id: int,
horizon: timedelta,
start: datetime,
end: datetime,
custom_model_params: dict = None,
) -> int:
"""Build forecasting model specs, make fixed-viewpoint forecasts, and save the forecasts made.
Each individual forecast is a belief about a time interval.
Fixed-viewpoint forecasts share the same belief time.
See the timely-beliefs lib for relevant terminology.
"""
# todo: implement fixed-viewpoint forecasts
raise NotImplementedError
def make_rolling_viewpoint_forecasts(
sensor_id: int,
horizon: timedelta,
start: datetime,
end: datetime,
custom_model_params: dict = None,
) -> int:
"""Build forecasting model specs, make rolling-viewpoint forecasts, and save the forecasts made.
Each individual forecast is a belief about a time interval.
Rolling-viewpoint forecasts share the same belief horizon (the duration between belief time and knowledge time).
Model specs are also retrained in a rolling fashion, but with its own frequency set in custom_model_params.
See the timely-beliefs lib for relevant terminology.
Parameters
----------
:param sensor_id: int
To identify which sensor to forecast
:param horizon: timedelta
duration between the end of each interval and the time at which the belief about that interval is formed
:param start: datetime
start of forecast period, i.e. start time of the first interval to be forecast
:param end: datetime
end of forecast period, i.e end time of the last interval to be forecast
:param custom_model_params: dict
pass in params which will be passed to the model specs configurator,
e.g. outcome_var_transformation, only advisable to be used for testing.
:returns: int
the number of forecasts made
"""
# https://docs.sqlalchemy.org/en/13/faq/connections.html#how-do-i-use-engines-connections-sessions-with-python-multiprocessing-or-os-fork
db.engine.dispose()
rq_job = get_current_job()
# find out which model to run, fall back to latest recommended
model_search_term = rq_job.meta.get("model_search_term", "linear-OLS")
# find sensor
sensor = Sensor.query.filter_by(id=sensor_id).one_or_none()
click.echo(
"Running Forecasting Job %s: %s for %s on model '%s', from %s to %s"
% (rq_job.id, sensor, horizon, model_search_term, start, end)
)
if hasattr(sensor, "market_type"):
ex_post_horizon = None # Todo: until we sorted out the ex_post_horizon, use all available price data
else:
ex_post_horizon = timedelta(hours=0)
# Make model specs
model_configurator = lookup_model_specs_configurator(model_search_term)
model_specs, model_identifier, fallback_model_search_term = model_configurator(
sensor=sensor,
forecast_start=as_server_time(start),
forecast_end=as_server_time(end),
forecast_horizon=horizon,
ex_post_horizon=ex_post_horizon,
custom_model_params=custom_model_params,
)
model_specs.creation_time = server_now()
rq_job.meta["model_identifier"] = model_identifier
rq_job.meta["fallback_model_search_term"] = fallback_model_search_term
rq_job.save()
# before we run the model, check if horizon is okay and enough data is available
if horizon not in supported_horizons():
raise InvalidHorizonException(
"Invalid horizon on job %s: %s" % (rq_job.id, horizon)
)
query_window = get_query_window(
model_specs.start_of_training,
end,
[lag * model_specs.frequency for lag in model_specs.lags],
)
check_data_availability(
sensor,
TimedBelief,
start,
end,
query_window,
horizon,
)
data_source = get_data_source(
data_source_name="Seita (%s)"
% rq_job.meta.get("model_identifier", "unknown model"),
data_source_type="forecasting script",
)
forecasts, model_state = make_rolling_forecasts(
start=as_server_time(start),
end=as_server_time(end),
model_specs=model_specs,
)
click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts)))
ts_value_forecasts = [
TimedBelief(
event_start=dt,
belief_horizon=horizon,
event_value=value,
sensor=sensor,
source=data_source,
)
for dt, value in forecasts.items()
]
bdf = tb.BeliefsDataFrame(ts_value_forecasts)
save_to_db(bdf)
db.session.commit()
return len(forecasts)
def handle_forecasting_exception(job, exc_type, exc_value, traceback):
"""
Decide if we can do something about this failure:
* Try a different model
* Re-queue at a later time (using rq_scheduler)
"""
click.echo("HANDLING RQ WORKER EXCEPTION: %s:%s\n" % (exc_type, exc_value))
if "failures" not in job.meta:
job.meta["failures"] = 1
else:
job.meta["failures"] = job.meta["failures"] + 1
job.save_meta()
# We might use this to decide if we want to re-queue a failed job
# if job.meta['failures'] < 3:
# job.queue.failures.requeue(job)
# TODO: use this to add more meta information?
# if exc_type == NotEnoughDataException:
if "fallback_model_search_term" in job.meta:
if job.meta["fallback_model_search_term"] is not None:
new_job = Job.create(
make_rolling_viewpoint_forecasts,
args=job.args,
kwargs=job.kwargs,
connection=current_app.queues["forecasting"].connection,
)
new_job.meta["model_search_term"] = job.meta["fallback_model_search_term"]
new_job.save_meta()
current_app.queues["forecasting"].enqueue_job(new_job)
def num_forecasts(start: datetime, end: datetime, resolution: timedelta) -> int:
"""Compute how many forecasts a job needs to make, given a resolution"""
return (end - start) // resolution
| 36.19244 | 141 | 0.688283 |
488e77b23ededf6682d54cf9c6cd3333931c6978 | 1,039 | py | Python | solutions/Course Schedule II/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | 3 | 2021-06-06T22:03:15.000Z | 2021-06-08T08:49:04.000Z | solutions/Course Schedule II/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | solutions/Course Schedule II/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
order = []
valid = [1] * numCourses
val = dict()
for x in prerequisites:
if x[0] in val:
val[x[0]].append(x[1])
else:
val[x[0]] = [x[1]]
valid[x[0]] = 0
for i in range(numCourses):
if valid[i] == 1:
order.append(i)
while(True):
change = 0
for i in range(numCourses):
random = 0
if valid[i] == 1:
continue
for x in val[i]:
if valid[x] == 0:
random = 1
break
if random == 0:
change += 1
valid[i] = 1
order.append(i)
if change == 0:
break
for x in valid:
if x==0:
return []
return order
| 29.685714 | 86 | 0.361886 |
e4a6ec94830031686a58be56fd37245202151992 | 2,209 | py | Python | devilry/devilry_compressionutil/backend_registry.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_compressionutil/backend_registry.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_compressionutil/backend_registry.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | from ievv_opensource.utils.singleton import Singleton
class DuplicateBackendTypeError(Exception):
"""
Exception raised when trying to add multiple :class:`.~devilry.devilry_ziputil.backends.PythonZipFileBackend`
with same ID.
"""
class Registry(Singleton):
"""
Registry for subclasses of
:class:`~devilry.devilry_ziputil.backends.backends_base.PythonZipFileBackend`.
"""
def __init__(self):
super(Registry, self).__init__()
self._backendclasses = {}
def __get_class_path(self):
"""
Get class path.
Returns:
Classpath.
"""
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def add(self, backend):
"""
Add a backend class.
Args:
backend: backend class.
"""
if backend.backend_id in self._backendclasses:
raise DuplicateBackendTypeError('Duplicate backend id in {}: {}'.format(
self.__get_class_path(), backend.backend_id
))
self._backendclasses[backend.backend_id] = backend
def get(self, backend_id):
"""
Get backend class.
Args:
backend_id: ID of backend class.
Returns:
:class:`~devilry.devilry_ziputil.backends.backends_base.PythonZipFileBackend` subclass or ``None``.
"""
try:
backend_class = self._backendclasses[backend_id]
except KeyError:
return None
return backend_class
class MockableRegistry(Registry):
"""
A non-singleton version of :class:`.Registry` for tests.
"""
def __init__(self):
self._instance = None
super(MockableRegistry, self).__init__()
@classmethod
def make_mockregistry(cls, *backend_classes):
"""
Create a mocked instance of Registry.
Args:
*backend_classes: Backends to add.
Returns:
MockableRegistry: An object of this class with the ``backend_classes`` registered.
"""
mockregistry = cls()
for backend_class in backend_classes:
mockregistry.add(backend_class)
return mockregistry
| 26.614458 | 113 | 0.611589 |
05a7a92177c638b16e713c15b994403bf349ee36 | 26,829 | py | Python | platform/gsutil/gslib/tests/test_ls.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | platform/gsutil/gslib/tests/test_ls.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | platform/gsutil/gslib/tests/test_ls.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 2 | 2020-11-04T03:08:21.000Z | 2020-11-05T08:14:41.000Z | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
import posixpath
import re
import subprocess
import sys
import gslib
from gslib.cs_api_map import ApiSelector
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.util import IS_WINDOWS
from gslib.util import Retry
from gslib.util import UTF8
class TestLs(testcase.GsUtilIntegrationTestCase):
"""Integration tests for ls command."""
def test_blank_ls(self):
self.RunGsUtil(['ls'])
def test_empty_bucket(self):
bucket_uri = self.CreateBucket()
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_empty_bucket_with_b(self):
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/\n' % suri(bucket_uri), stdout)
_Check1()
def test_bucket_with_Lb(self):
"""Tests ls -Lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_with_lb(self):
"""Tests ls -lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_list_wildcard(self):
"""Tests listing multiple buckets with a wildcard."""
random_prefix = self.MakeRandomTestString()
bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
# This just double checks that the common prefix of the two buckets is what
# we think it should be (based on implementation detail of CreateBucket).
# We want to be careful when setting a wildcard on buckets to make sure we
# don't step outside the test buckets to affect other buckets.
common_prefix = posixpath.commonprefix([suri(bucket1_uri),
suri(bucket2_uri)])
self.assertTrue(common_prefix.startswith(
'%s://%sgsutil-test-test_bucket_list_wildcard-bucket-' %
(self.default_provider, random_prefix)))
wildcard = '%s*' % common_prefix
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
actual = set(stdout.split())
self.assertEqual(expected, actual)
_Check1()
def test_nonexistent_bucket_with_ls(self):
"""Tests a bucket that is known not to exist."""
stderr = self.RunGsUtil(
['ls', '-lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True, expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-Lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True, expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-b', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True, expected_status=1)
self.assertIn('404', stderr)
def test_list_missing_object(self):
"""Tests listing a non-existent object."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'missing')],
return_stderr=True, expected_status=1)
self.assertIn('matched no objects', stderr)
def test_with_one_object(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
_Check1()
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-d', '%s/dir*' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/dir/\n%s/dir2/\n' %
(suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri],
return_stdout=True)
self.assertNotIn('Location constraint', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Location constraint:\t\tUS', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri],
return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri,
bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri],
return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri],
return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents='x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
object_name_bytes = object_name.encode(UTF8)
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)],
return_stdout=True)
self.assertIn(object_name_bytes, stdout)
if self.default_provider == 'gs':
self.assertIn(str(key_uri.generation), stdout)
self.assertIn(
'metageneration=%s' % key_uri.get_key().metageneration, stdout)
if self.test_api == ApiSelector.XML:
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
else:
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
self.assertIn('etag=', stdout)
elif self.default_provider == 's3':
self.assertIn(key_uri.version_id, stdout)
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents='foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = 'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Encoding:\s+gzip')
find_content_length_re = r'Content-Length:\s+(?P<num>\d)'
self.assertRegexpMatches(stdout, find_content_length_re)
m = re.search(find_content_length_re, stdout)
content_length = int(m.group('num'))
self.assertGreater(content_length, 0)
self.assertLess(content_length, file_size)
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls', suri(bucket_uri)]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='/', contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/', stdout)
def test_recursive_list_trailing_two_slash(self):
"""Tests listing an object with two trailing slashes."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='//', contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '//', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri, object_name=wildcard_folder_object,
contents='foo')
self.CreateObject(bucket_uri=bucket_uri, object_name=object_matching_folder,
contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True, expected_status=1)
self.assertIn('Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri, '**')],
return_stdout=True)
self.assertNumLines(stdout, 3) # 2 object lines, one summary line.
_Check()
@SkipForS3('S3 anonymous access is not supported.')
def test_get_object_without_list_bucket_permission(self):
# Bucket is not publicly readable by default.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='permitted', contents='foo')
# Set this object to be publicly readable.
self.RunGsUtil(['acl', 'set', 'public-read', suri(object_uri)])
# Drop credentials.
with self.SetAnonymousBotoCreds():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(suri(object_uri), stdout)
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_encrypted_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_uri = self.CreateObject(object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
# Listing object with key should return unencrypted hashes.
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectDecrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64, stdout)
_ListExpectDecrypted()
# Listing object without a key should return encrypted hashes.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectEncrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64, stdout)
_ListExpectEncrypted()
# Listing object with a non-matching key should return encrypted hashes.
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]):
_ListExpectEncrypted()
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_mixed_encryption(self):
"""Tests listing objects with various encryption interactions."""
bucket_uri = self.CreateBucket()
self.CreateObject(
bucket_uri=bucket_uri, object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1, encryption_key=TEST_ENCRYPTION_KEY1)
self.CreateObject(
bucket_uri=bucket_uri, object_name='foo2',
contents=TEST_ENCRYPTION_CONTENT2, encryption_key=TEST_ENCRYPTION_KEY2)
self.CreateObject(
bucket_uri=bucket_uri, object_name='foo3',
contents=TEST_ENCRYPTION_CONTENT3, encryption_key=TEST_ENCRYPTION_KEY3)
self.CreateObject(
bucket_uri=bucket_uri, object_name='foo4',
contents=TEST_ENCRYPTION_CONTENT4, encryption_key=TEST_ENCRYPTION_KEY4)
self.CreateObject(
bucket_uri=bucket_uri, object_name='foo5',
contents=TEST_ENCRYPTION_CONTENT5)
# List 5 objects, one encrypted with each of four keys, and one
# unencrypted. Supplying keys [1,3,4] should result in four unencrypted
# listings and one encrypted listing (for key 2).
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY4)
]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectMixed():
"""Validates object listing."""
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY2_SHA256_B64, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY3_SHA256_B64, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY4_SHA256_B64, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_CRC32C, stdout)
_ListExpectMixed()
| 43.766721 | 103 | 0.681017 |
2b94621993cb2121bde7c9ee5c6588cca9078f30 | 4,598 | py | Python | src/tarski/fstrips/visitors.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 29 | 2018-11-26T20:31:04.000Z | 2021-12-29T11:08:40.000Z | src/tarski/fstrips/visitors.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 101 | 2018-06-07T13:10:01.000Z | 2022-03-11T11:54:00.000Z | src/tarski/fstrips/visitors.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 18 | 2018-11-01T22:44:39.000Z | 2022-02-28T04:57:15.000Z | """
Visitors implementing diverse aspects of FSTRIPS problems translation,
analysis and compilation.
"""
from typing import Set
from ..syntax.symrefs import TermReference
from ..syntax.temporal import ltl
from ..syntax.formulas import CompoundFormula, Atom, QuantifiedFormula
from ..syntax.terms import CompoundTerm
from ..syntax import symref
class FluentHeuristic:
action_effects = 1
precondition = 2
constraint = 3
class FluentSymbolCollector:
"""
This visitor collects CompoundTerms which are candidates to become
state variables.
"""
def __init__(self, lang, fluents, statics, mode: FluentHeuristic):
self.mode = mode
self.lang = lang
self.fluents = fluents
self.statics = statics
self.under_next = False
self.visited = set() # type: Set[TermReference]
def reset(self):
self.visited = set()
def _visit_action_effect_formula(self, phi):
if isinstance(phi, CompoundFormula):
_ = [self.visit(f) for f in phi.subformulas]
elif isinstance(phi, QuantifiedFormula):
self.visit(phi.formula)
elif isinstance(phi, Atom):
if not phi.predicate.builtin:
self.fluents.add(symref(phi))
else:
_ = [self.visit(f) for f in phi.subterms]
elif isinstance(phi, CompoundTerm):
# print("Compound Term: {}, {}, {}".format(str(phi), phi.symbol, phi.symbol.builtin))
if not phi.symbol.builtin:
self.fluents.add(symref(phi))
else:
_ = [self.visit(f) for f in phi.subterms]
def _visit_constraint_formula(self, phi):
if isinstance(phi, ltl.TemporalCompoundFormula) and phi.connective == ltl.TemporalConnective.X:
old_value = self.under_next
self.under_next = True
_ = [self.visit(f) for f in phi.subformulas]
self.under_next = old_value
elif isinstance(phi, CompoundFormula):
old_visited = self.visited.copy()
_ = [self.visit(f) for f in phi.subformulas]
delta = self.visited - old_visited
# print('Fluents: {}'.format([str(x) for x in self.fluents]))
# print('Delta: {}'.format([str(x) for x in delta]))
if any(f in self.fluents for f in delta):
# print("Fluency propagates")
for f in delta:
self.fluents.add(f)
elif isinstance(phi, QuantifiedFormula):
self.visit(phi.formula)
elif isinstance(phi, Atom):
if not phi.predicate.builtin:
self.visited.add(symref(phi))
if self.under_next:
if not phi.predicate.builtin:
self.fluents.add(symref(phi))
else:
self.statics.add(symref(phi))
_ = [self.visit(f) for f in phi.subterms]
elif isinstance(phi, CompoundTerm):
if not phi.symbol.builtin:
self.visited.add(symref(phi))
if self.under_next:
if not phi.symbol.builtin:
self.fluents.add(symref(phi))
else:
self.statics.add(symref(phi))
_ = [self.visit(f) for f in phi.subterms]
def _visit_precondition_formula(self, phi):
if isinstance(phi, CompoundFormula):
_ = [self.visit(f) for f in phi.subformulas]
elif isinstance(phi, QuantifiedFormula):
self.visit(phi.formula)
elif isinstance(phi, Atom):
self.statics.add(symref(phi))
elif isinstance(phi, CompoundTerm):
self.statics.add(symref(phi))
def visit(self, phi):
"""
Visitor method to sort atoms and terms into the
"fluent" and "static" categories. Note that a given
symbol can be in both sets, this means that it gets
"votes" as static and fluent... the post_process() method
is meant to settle the issue (and potentially allow for
more ellaborate/clever heuristics).
NB: at the moment we're trawling all (possibly lifted)
sub-expressions, this is intentional.
"""
if self.mode == FluentHeuristic.action_effects:
self._visit_action_effect_formula(phi)
elif self.mode == FluentHeuristic.constraint:
self._visit_constraint_formula(phi)
else:
assert self.mode == FluentHeuristic.precondition
self._visit_precondition_formula(phi)
| 35.099237 | 103 | 0.594389 |
3ed76ba2b7d42660492afdccca06d4721336bbb4 | 39,920 | py | Python | trainer.py | AZdet/causal-infogan | 146b647863a27542ad4a1a01ddb033cdcab9843d | [
"MIT"
] | null | null | null | trainer.py | AZdet/causal-infogan | 146b647863a27542ad4a1a01ddb033cdcab9843d | [
"MIT"
] | null | null | null | trainer.py | AZdet/causal-infogan | 146b647863a27542ad4a1a01ddb033cdcab9843d | [
"MIT"
] | 1 | 2020-02-15T19:17:24.000Z | 2020-02-15T19:17:24.000Z | import numpy as np
import csv
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
from tqdm import tqdm
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from tensorboard_logger import configure, log_value
from collections import OrderedDict
from vpa.planning import plan_traj_astar, discretize, undiscretize
# from dataset import ImagePairs
from vpa.gcp_datasets import h36m, maze, maze1000, sawyer
from vpa.utils import plot_img, from_numpy_to_var, print_array, write_number_on_images, write_stats_from_var
from vpa.logger import Logger
from vpa.eval import EvalPSNR
class Trainer:
def __init__(self, G, D, Q, T, P, **kwargs):
# Models
self.G = G
self.D = D
self.Q = Q
self.T = T
self.P = P
self.classifier = kwargs['classifier']
self.fcn = kwargs.get('fcn', None)
# Weights
self.lr_g = kwargs['lr_g']
self.lr_d = kwargs['lr_d']
self.infow = kwargs['infow']
self.transw = kwargs['transw']
# Training hyperparameters
self.batch_size = 16
self.n_epochs = kwargs['n_epochs']
self.c_dim = kwargs['cont_code_dim']
self.rand_z_dim = kwargs['random_noise_dim']
self.channel_dim = kwargs['channel_dim']
self.latent_dim = self.c_dim + self.rand_z_dim
self.k = kwargs['k']
self.gray = kwargs['gray']
# Planning hyperparameters
self.planner = getattr(self, kwargs['planner'])
self.traj_eval_copies = kwargs['traj_eval_copies']
self.planning_epoch = kwargs['planning_epoch']
self.plan_length = kwargs['plan_length']
self.discretization_bins = 20
self.n_closest_iters = kwargs['n_closest_iters']
# Make directories
self.data_dir = kwargs['data_dir']
self.planning_data_dir = kwargs['planning_data_dir']
self.out_dir = kwargs['out_dir']
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
# TF logger.
self.logger = None
self.configure_logger()
self.log_dict = OrderedDict()
# Evaluation
self.test_sample_size = 12
self.test_num_codes = max(20, self.c_dim + 1)
self.test_size = self.test_sample_size * self.test_num_codes
self.eval_input = self._eval_noise()
if kwargs['dataset'] == 'h36m':
self.dataset = h36m.Dataset
elif kwargs['dataset'] == 'maze':
self.dataset = maze.Dataset
elif kwargs['dataset'] == 'maze1000':
self.dataset = maze1000.Dataset
elif kwargs['dataset'] == 'sawyer':
self.dataset = sawyer.Dataset
def configure_logger(self):
self.logger = Logger(os.path.join(self.out_dir, "log"))
configure(os.path.join(self.out_dir, "log"), flush_secs=5)
def _noise_sample(self, z, bs):
c = self.P.sample(bs)
c_next = self.T(c)
z.data.normal_(0, 1)
return z, c, c_next
def _eval_noise(self):
'''
:return: z (sample_size x num_codes x z_dim), c (sample_size x num_codes x z_dim)
'''
more_codes = self.test_num_codes - (self.c_dim + 1)
# c = Variable(torch.cuda.FloatTensor([[j<i for j in range(self.disc_c_dim)] for i in range(min(self.test_num_codes, self.disc_c_dim+1))]))
c = Variable(torch.cuda.FloatTensor(
[[j < i for j in range(self.c_dim)] for i in range(min(self.test_num_codes, self.c_dim + 1))])) * (
self.P.unif_range[1] - self.P.unif_range[0]) + self.P.unif_range[0]
if more_codes > 0:
c = torch.cat([c, self.P.sample(more_codes)], 0)
self.eval_c = c
z = Variable(torch.FloatTensor(self.test_sample_size, self.rand_z_dim).normal_(0, 1).cuda())
plot_img(c.t().detach().cpu(),
os.path.join(self.out_dir, 'gen', 'eval_code.png'),
vrange=self.P.unif_range)
return z[:, None, :].repeat(1, self.test_num_codes, 1).view(-1, self.rand_z_dim), \
c.repeat(1, 1, self.test_sample_size).permute(2, 0, 1).contiguous().view(-1, self.c_dim)
def get_c_next(self, epoch):
c_next = self.T(self.eval_c)
plot_img(c_next.t().detach().cpu(),
os.path.join(self.out_dir, 'gen', 'eval_code_next_%d.png' % epoch),
vrange=self.P.unif_range)
return c_next.repeat(1, 1, self.test_sample_size).permute(2, 0, 1).contiguous().view(-1, self.c_dim)
def apply_fcn_mse(self, img):
o = self.fcn(Variable(img).cuda()).detach()
return torch.clamp(2 * (o - 0.5), -1 + 1e-3, 1 - 1e-3)
# return torch.clamp(2.6*(o - 0.5), -1 + 1e-3, 1 - 1e-3)
def preprocess_function(self, state):
return discretize(state, self.discretization_bins, self.P.unif_range)
def discriminator_function(self, obs, obs_next):
out = self.classifier(obs, obs_next)
return out.detach().cpu().numpy()
def discriminator_function_np(self, obs, obs_next):
return self.discriminator_function(from_numpy_to_var(obs),
from_numpy_to_var(obs_next))
def continuous_transition_function(self, c_):
c_ = undiscretize(c_, self.discretization_bins, self.P.unif_range)
c_next_ = self.T(from_numpy_to_var(c_)).data.cpu().numpy()
c_next_ = np.clip(c_next_, self.P.unif_range[0] + 1e-6, self.P.unif_range[1] - 1e-6)
c_next_d = discretize(c_next_, self.discretization_bins, self.P.unif_range)
return c_next_d
def conditional_generator_function(self, c_, c_next_, obs):
'''
This doesn't do anything.
'''
c_ = undiscretize(c_, self.discretization_bins, self.P.unif_range)
c_next_ = undiscretize(c_next_, self.discretization_bins, self.P.unif_range)
z_ = from_numpy_to_var(np.random.randn(c_.shape[0], self.rand_z_dim))
_, next_observation = self.G(z_, from_numpy_to_var(c_), from_numpy_to_var(c_next_))
return next_observation.data.cpu().numpy()
def train(self):
# Set up training.
criterionD = nn.BCELoss().cuda()
optimD = optim.Adam([{'params': self.D.parameters()}], lr=self.lr_d,
betas=(0.5, 0.999))
optimG = optim.Adam([{'params': self.G.parameters()},
{'params': self.Q.parameters()},
{'params': self.T.parameters()}], lr=self.lr_g,
betas=(0.5, 0.999))
############################################
# Load rope dataset and apply transformations
rope_path = os.path.realpath(self.data_dir)
trans = [
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
if not self.fcn:
# If fcn it will do the transformation to gray
# and normalize in the loop.
trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
if self.gray:
# Apply grayscale transformation.
trans.append(lambda x: x.mean(dim=0)[None, :, :])
trans_comp = transforms.Compose(trans)
# Image 1 and image 2 are k steps apart.
dataset = self.dataset(phase='train', mode='train')
self.plan_length = dataset.spec.max_seq_len - 3
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
drop_last=True)
############################################
# Load eval plan dataset
planning_data_dir = self.planning_data_dir
dataset_plan = self.dataset(phase='val', mode='plan')
data_plan_loader = torch.utils.data.DataLoader(dataset_plan,
batch_size=1,
shuffle=False,
num_workers=4,
drop_last=True)
# dataset_start = self.dataset(phase='val', mode='start')
# dataset_goal = self.dataset(phase='val', mode='goal')
# data_start_loader = torch.utils.data.DataLoader(dataset_start,
# batch_size=1,
# shuffle=False,
# num_workers=1,
# drop_last=True)
# data_goal_loader = torch.utils.data.DataLoader(dataset_goal,
# batch_size=1,
# shuffle=False,
# num_workers=1,
# drop_last=True)
############################################
real_o = Variable(torch.FloatTensor(self.batch_size, 3, dataset.img_sz, dataset.img_sz).cuda(), requires_grad=False)
real_o_next = Variable(torch.FloatTensor(self.batch_size, 3, dataset.img_sz, dataset.img_sz).cuda(), requires_grad=False)
label = Variable(torch.FloatTensor(self.batch_size).cuda(), requires_grad=False)
z = Variable(torch.FloatTensor(self.batch_size, self.rand_z_dim).cuda(), requires_grad=False)
for epoch in range(self.n_epochs + 1):
self.G.train()
self.D.train()
self.Q.train()
self.T.train()
for num_iters, batch_data in tqdm(enumerate(dataloader, 0)):
# break
# Real data
o, _ = batch_data[0]
o_next, _ = batch_data[1]
bs = o.size(0)
real_o.data.resize_(o.size())
real_o_next.data.resize_(o_next.size())
label.data.resize_(bs)
real_o.data.copy_(o)
real_o_next.data.copy_(o_next)
if self.fcn:
real_o = self.apply_fcn_mse(o)
real_o_next = self.apply_fcn_mse(o_next)
if real_o.abs().max() > 1:
import ipdb;
ipdb.set_trace()
assert real_o.abs().max() <= 1
if epoch == 0:
break
############################################
# D Loss (Update D)
optimD.zero_grad()
# Real data
probs_real = self.D(real_o, real_o_next)
label.data.fill_(1)
loss_real = criterionD(probs_real, label)
loss_real.backward()
# Fake data
z, c, c_next = self._noise_sample(z, bs)
fake_o, fake_o_next = self.G(z, c, c_next)
probs_fake = self.D(fake_o.detach(), fake_o_next.detach())
label.data.fill_(0)
loss_fake = criterionD(probs_fake, label)
loss_fake.backward()
D_loss = loss_real + loss_fake
optimD.step()
############################################
# G loss (Update G)
optimG.zero_grad()
probs_fake_2 = self.D(fake_o, fake_o_next)
label.data.fill_(1)
G_loss = criterionD(probs_fake_2, label)
# Q loss (Update G, T, Q)
ent_loss = -self.P.log_prob(c).mean(0)
crossent_loss = -self.Q.log_prob(fake_o, c).mean(0)
crossent_loss_next = -self.Q.log_prob(fake_o_next, c_next).mean(0)
# trans_prob = self.T.get_prob(Variable(torch.eye(self.dis_c_dim).cuda()))
ent_loss_next = -self.T.log_prob(c, None, c_next).mean(0)
mi_loss = crossent_loss - ent_loss
mi_loss_next = crossent_loss_next - ent_loss_next
Q_loss = mi_loss + mi_loss_next
# T loss (Update T)
Q_c_given_x, Q_c_given_x_var = (i.detach() for i in self.Q.forward(real_o))
t_mu, t_variance = self.T.get_mu_and_var(c)
t_diff = t_mu - c
# Keep the variance small.
# TODO: add loss on t_diff
T_loss = (t_variance ** 2).sum(1).mean(0)
(G_loss +
self.infow * Q_loss +
self.transw * T_loss).backward()
optimG.step()
#############################################
# Logging (iteration)
if num_iters % 100 == 0:
self.log_dict['Dloss'] = D_loss.item()
self.log_dict['Gloss'] = G_loss.item()
self.log_dict['Qloss'] = Q_loss.item()
self.log_dict['Tloss'] = T_loss.item()
self.log_dict['mi_loss'] = mi_loss.item()
self.log_dict['mi_loss_next'] = mi_loss_next.item()
self.log_dict['ent_loss'] = ent_loss.item()
self.log_dict['ent_loss_next'] = ent_loss_next.item()
self.log_dict['crossent_loss'] = crossent_loss.item()
self.log_dict['crossent_loss_next'] = crossent_loss_next.item()
self.log_dict['D(real)'] = probs_real.data.mean()
self.log_dict['D(fake)_before'] = probs_fake.data.mean()
self.log_dict['D(fake)_after'] = probs_fake_2.data.mean()
write_stats_from_var(self.log_dict, Q_c_given_x, 'Q_c_given_real_x_mu')
write_stats_from_var(self.log_dict, Q_c_given_x, 'Q_c_given_real_x_mu', idx=0)
write_stats_from_var(self.log_dict, Q_c_given_x_var, 'Q_c_given_real_x_variance')
write_stats_from_var(self.log_dict, Q_c_given_x_var, 'Q_c_given_real_x_variance', idx=0)
write_stats_from_var(self.log_dict, t_mu, 't_mu')
write_stats_from_var(self.log_dict, t_mu, 't_mu', idx=0)
write_stats_from_var(self.log_dict, t_diff, 't_diff')
write_stats_from_var(self.log_dict, t_diff, 't_diff', idx=0)
write_stats_from_var(self.log_dict, t_variance, 't_variance')
write_stats_from_var(self.log_dict, t_variance, 't_variance', idx=0)
print('\n#######################'
'\nEpoch/Iter:%d/%d; '
'\nDloss: %.3f; '
'\nGloss: %.3f; '
'\nQloss: %.3f, %.3f; '
'\nT_loss: %.3f; '
'\nEnt: %.3f, %.3f; '
'\nCross Ent: %.3f, %.3f; '
'\nD(x): %.3f; '
'\nD(G(z)): b %.3f, a %.3f;'
'\n0_Q_c_given_rand_x_mean: %.3f'
'\n0_Q_c_given_rand_x_std: %.3f'
'\n0_Q_c_given_fixed_x_std: %.3f'
'\nt_diff_abs_mean: %.3f'
'\nt_std_mean: %.3f'
% (epoch, num_iters,
D_loss.item(),
G_loss.item(),
mi_loss.item(), mi_loss_next.item(),
T_loss.item(),
ent_loss.item(), ent_loss_next.item(),
crossent_loss.item(), crossent_loss_next.item(),
probs_real.data.mean(),
probs_fake.data.mean(), probs_fake_2.data.mean(),
Q_c_given_x[:, 0].cpu().numpy().mean(),
Q_c_given_x[:, 0].cpu().numpy().std(),
np.sqrt(Q_c_given_x_var[:, 0].cpu().numpy().mean()),
t_diff.data.abs().mean(),
t_variance.data.sqrt().mean(),
))
#############################################
# Start evaluation from here.
self.G.eval()
self.D.eval()
self.Q.eval()
self.T.eval()
#############################################
# Save images
# Plot fake data
x_save, x_next_save = self.G(*self.eval_input, self.get_c_next(epoch))
save_image(x_save.data,
os.path.join(self.out_dir, 'gen', 'curr_samples_%03d.png' % epoch),
nrow=self.test_num_codes,
normalize=True)
save_image(x_next_save.data,
os.path.join(self.out_dir, 'gen', 'next_samples_%03d.png' % epoch),
nrow=self.test_num_codes,
normalize=True)
save_image((x_save - x_next_save).data,
os.path.join(self.out_dir, 'gen', 'diff_samples_%03d.png' % epoch),
nrow=self.test_num_codes,
normalize=True)
# Plot real data.
if epoch % 10 == 0:
save_image(real_o.data,
os.path.join(self.out_dir, 'real', 'real_samples_%d.png' % epoch),
nrow=self.test_num_codes,
normalize=True)
save_image(real_o_next.data,
os.path.join(self.out_dir, 'real', 'real_samples_next_%d.png' % epoch),
nrow=self.test_num_codes,
normalize=True)
#############################################
# Save parameters
if epoch % 5 == 0:
if not os.path.exists('%s/var' % self.out_dir):
os.makedirs('%s/var' % self.out_dir)
for i in [self.G, self.D, self.Q, self.T]:
torch.save(i.state_dict(),
os.path.join(self.out_dir,
'var',
'%s_%d' % (i.__class__.__name__, epoch,
)))
#############################################
# Logging (epoch)
for k, v in self.log_dict.items():
log_value(k, v, epoch)
if epoch > 0:
# tf logger
# log_value('avg|x_next - x|', (x_next_save.data - x_save.data).abs().mean(dim=0).sum(), epoch + 1)
# self.logger.histo_summary("Q_c_given_x", Q_c_given_x.data.cpu().numpy().reshape(-1), step=epoch)
# self.logger.histo_summary("Q_c0_given_x", Q_c_given_x[:, 0].data.cpu().numpy(), step=epoch)
# self.logger.histo_summary("Q_c_given_x_var", Q_c_given_x_var.cpu().numpy().reshape(-1), step=epoch)
# self.logger.histo_summary("Q_c0_given_x_var", Q_c_given_x_var[:, 0].data.cpu().numpy(), step=epoch)
# csv log
with open(os.path.join(self.out_dir, 'progress.csv'), 'a') as csv_file:
writer = csv.writer(csv_file)
if epoch == 1:
writer.writerow(["epoch"] + list(self.log_dict.keys()))
writer.writerow(["%.3f" % _tmp for _tmp in [epoch] + list(self.log_dict.values())])
#############################################
# Do planning?
if self.plan_length <= 0 or epoch not in self.planning_epoch:
continue
print("\n#######################"
"\nPlanning")
#############################################
# Showing plans on real images using best code.
# Min l2 distance from start and goal real images.
evaluator = EvalPSNR(2)
plans = []
datas = []
for i, data in enumerate(data_plan_loader):
plan = self.plan_hack(i, data[:, 0], data[:, -1], epoch, 'L2', data.shape[1] - 3, save=False)
evaluator(plan[None].cpu().numpy(), data.cpu().numpy())
print(evaluator.PSNR(), evaluator.SSIM())
# if i < 4:
# self.make_gif(torch.cat([data[0], plan.cpu()], dim=2), i, epoch)
plans.append(plan.cpu())
datas.append(data[0])
if i == 3:
for i in range(4): datas[i] = np.concatenate(
[datas[i], np.zeros([100 - datas[i].shape[0]] + list(datas[i].shape[1:]))], 0)
for i in range(4): plans[i] = np.concatenate([plans[i], torch.zeros([100 - plans[i].shape[0]] + list(plans[i].shape[1:]))], 0)
data = np.concatenate(datas, 3)
plan = np.concatenate(plans, 3)
self.make_gif(torch.from_numpy(np.concatenate([data, plan], 2)), i, epoch, fps=4)
import pdb; pdb.set_trace()
print(('Test: [{0}/{1}]\t'
'PSNR {PSNR:.3f}'
'SSIM {SSIM:.3f}'.format(
i,
len(data_plan_loader),
PSNR=evaluator.PSNR(),
SSIM=evaluator.SSIM())))
# # Min classifier distance from start and goal real images.
# self.plan_hack(data_start_loader,
# data_goal_loader,
# epoch,
# 'classifier')
#############################################
# Visual Planning
def plan_hack(self,
i,
start_img,
goal_img,
epoch,
metric,
length,
save=True, # TODO implement
keep_best=1):
"""
Generate visual plans from starts to goals.
First, find the closest codes for starts and goals.
Then, generate the plans in the latent space.
Finally, map the latent plans to visual plans and use the classifier to pick the top K.
The start image is fixed. The goal image is loaded from data_goal_loader.
:param data_start_loader:
:param data_goal_loader:
:param epoch:
:param metric:
:param keep_best:
:return:
"""
all_confidences = []
c_start = None
est_start_obs = None
# for start_img in data_start_loader:
if self.fcn:
start_obs = self.apply_fcn_mse(start_img)
else:
start_obs = Variable(start_img).cuda()
pt_start = os.path.join(self.out_dir, 'plans', 'c_min_start_%s_%i.pt' % (metric, i))
if os.path.exists(pt_start) and save:
z_start, c_start, _, est_start_obs = torch.load(pt_start)
else:
z_start, c_start, _, est_start_obs = self.closest_code(start_obs,
400,
False,
metric, 1)
if save: torch.save([z_start, c_start, _, est_start_obs], pt_start)
# Hacky for now
try:
c_start = Variable(c_start)
est_start_obs = Variable(est_start_obs)
except RuntimeError:
pass
# for i, goal_img in enumerate(data_goal_loader, 0):
if self.fcn:
goal_obs = self.apply_fcn_mse(goal_img)
else:
goal_obs = Variable(goal_img).cuda()
pt_goal = os.path.join(self.out_dir, 'plans', 'c_min_goal_%s_%d_epoch_%d.pt' % (metric, i, epoch))
if os.path.exists(pt_goal) and save:
z_goal, _, c_goal, est_goal_obs = torch.load(pt_goal)
else:
z_goal, _, c_goal, est_goal_obs = self.closest_code(goal_obs,
400,
True,
metric, 1)
if save: torch.save([z_goal, _, c_goal, est_goal_obs], pt_goal)
# Hacky for now
try:
c_goal = Variable(c_goal)
est_goal_obs = Variable(est_goal_obs)
except RuntimeError:
pass
# Plan using c_start and c_goal.
rollout = self.planner(c_start.repeat(self.traj_eval_copies, 1),
c_goal.repeat(self.traj_eval_copies, 1),
length,
start_obs=start_obs,
goal_obs=goal_obs)
# Insert real start and goal.
rollout.insert(0, est_start_obs.repeat(self.traj_eval_copies, 1, 1, 1))
rollout.append(est_goal_obs.repeat(self.traj_eval_copies, 1, 1, 1))
rollout_best_k, confidences = self.get_best_k(rollout, 1)
rollout_data = torch.stack(rollout_best_k, dim=0)
masks = - np.ones([rollout_data.size()[0], keep_best, self.channel_dim] + list(start_img.shape[-2:]),
dtype=np.float32)
# write_number_on_images(masks, confidences)
pd = torch.max(rollout_data, from_numpy_to_var(masks))\
.permute(1, 0, 2, 3, 4).contiguous().view([-1, self.channel_dim] + list(start_img.shape[-2:]))
# confidences.T has size keep_best x rollout length
all_confidences.append(confidences.T[-1][:-1])
if save:
save_image(pd.data,
os.path.join(self.out_dir, 'plans', '%s_min_%s_%d_epoch_%d.png'
% (self.planner.__name__, metric, i, epoch)),
nrow=int(pd.size()[0] / keep_best),
normalize=True)
self.save_img(start_obs, os.path.join(self.out_dir, 'plans/im0.png'))
# After the loop
# all_confidences = np.stack(all_confidences)
# print((all_confidences[:, 0] > 0.9).sum(), (all_confidences[:, -1] > 0.9).sum())
# import pickle as pkl
# with open(os.path.join(self.out_dir, 'all_confidences.pkl'), 'wb') as f:
# pkl.dump(all_confidences, f)
# import matplotlib.pyplot as plt
# plt.boxplot([all_confidences.mean(1), all_confidences[all_confidences[:, -1] > 0.9].mean(1)])
# plt.savefig(os.path.join(self.out_dir, 'boxplot.png'))
return pd
def save_img(self, img, name):
import skimage
skimage.io.imsave(name, img.detach().cpu().numpy()[0].transpose((1, 2, 0)))
def make_gif(self, plan, i, epoch, fps):
from recursive_planning.infra.utils.create_gif_lib import npy_to_gif, npy_to_mp4
filename = self.out_dir + '/plans/gif_{}_{}'.format(i, epoch)
x = plan.detach().cpu().numpy()
npy_to_gif(list(((x.transpose([0, 2, 3, 1]) + 1) * 127.5).astype(np.uint8)), filename, fps=fps)
# def plan(self,
# dataloader,
# epoch,
# metric,
# keep_best=10):
# """
# Generate visual plans from starts to goals.
# First, find the closest codes for starts and goals.
# Then, generate the plans in the latent space.
# Finally, map the latent plans to visual plans and use the classifier to pick the top K.
# The start image is loaded from data_start_loader. The goal image is loaded from data_goal_loader.
# :param data_start_loader:
# :param data_goal_loader:
# :param epoch:
# :param metric:
# :param keep_best:
# :return:
# """
#
# for i, pair in enumerate(dataloader, 0):
# if self.fcn:
# start_obs = self.apply_fcn_mse(pair[0][0])
# goal_obs = self.apply_fcn_mse(pair[1][0])
#
# # Compute c_start and c_goal
# pt_path = os.path.join(self.out_dir, 'plans', 'c_min_%s_%d_epoch_%d.pt' % (metric, i, epoch))
# if os.path.exists(pt_path):
# c_start, c_goal, est_start_obs, est_goal_obs = torch.load(pt_path)
# else:
# _, c_start, _, est_start_obs = self.closest_code(start_obs,
# 400,
# False,
# metric, 1)
# _, _, c_goal, est_goal_obs = self.closest_code(goal_obs,
# 400,
# True,
# metric, 1)
# # _, c_start, _, est_start_obs = self.closest_code(start_obs,
# # self.traj_eval_copies,
# # False,
# # metric, 0)
# # _, _, c_goal, est_goal_obs = self.closest_code(goal_obs,
# # self.traj_eval_copies,
# # True,
# # metric, 0)
# torch.save([c_start, c_goal, est_start_obs, est_goal_obs], pt_path)
#
# # Plan using c_start and c_goal.
# rollout = self.planner(c_start.repeat(self.traj_eval_copies, 1),
# c_goal.repeat(self.traj_eval_copies, 1),
# start_obs=start_obs,
# goal_obs=goal_obs)
#
# # Insert closest start and goal.
# rollout.insert(0, est_start_obs.repeat(self.traj_eval_copies, 1, 1, 1))
# rollout.append(est_goal_obs.repeat(self.traj_eval_copies, 1, 1, 1))
#
# # Insert real start and goal.
# rollout.insert(0, start_obs.repeat(self.traj_eval_copies, 1, 1, 1))
# rollout.append(goal_obs.repeat(self.traj_eval_copies, 1, 1, 1))
#
# rollout_best_k, confidences = self.get_best_k(rollout, keep_best)
# rollout_data = torch.stack(rollout_best_k, dim=0)
#
# masks = - np.ones((rollout_data.size()[0], keep_best, self.channel_dim, 64, 64),
# dtype=np.float32)
# write_number_on_images(masks, confidences)
#
# # save_image(torch.max(rollout_data, from_numpy_to_var(masks)).view(-1, self.channel_dim, 64, 64).data,
# # os.path.join(self.out_dir, 'plans', '%s_min_%s_%d_epoch_%d.png'
# # % (self.planner.__name__, metric, i, epoch)),
# # nrow=keep_best,
# # normalize=True)
#
# pd = torch.max(rollout_data, from_numpy_to_var(masks)).permute(1, 0, 2, 3, 4).contiguous().view(-1, self.channel_dim, 64, 64)
#
# save_image(pd.data,
# os.path.join(self.out_dir, 'plans', '%s_min_%s_%d_epoch_%d.png'
# % (self.planner.__name__, metric, i, epoch)),
# nrow=int(pd.size()[0] / keep_best),
# normalize=True)
def get_best_k(self, rollout, keep_best=10):
"""
Evaluate confidence using discriminator.
:param rollout: (list) n x (torch) channel size x W x H
:param keep_best: get the best keep_best scores.
:return: rollout list size n x (torch) keep_best x channel size x W x H,
confidence np size n-1 x keep_best
"""
confidences = [self.D(rollout[i], rollout[i + 1]).reshape(-1).detach().cpu().numpy() for i in
range(len(rollout) - 1)]
np_confidences = np.array(confidences)
# take minimum confidence along trajectory
min_confidences = np.mean(np_confidences, axis=0)
# sort according to confidence
sort_ind = np.argsort(min_confidences, axis=0)
rollout = [r[sort_ind[-keep_best:]] for r in rollout]
# confidences = [c[sort_ind[-keep_best:]] for c in confidences]
np_confidences = np.concatenate([np_confidences[:, sort_ind[-keep_best:]],
np.zeros((1, keep_best))], 0)
return rollout, np_confidences
def closest_code(self, obs, n_trials, use_second, metric, regress_bs, verbose=True):
"""
Get the code that generates an image with closest distance to obs.
:param obs: 1 x channel_dim x img_W x img_H
:param n_trials: number of copies to search
:param use_second: bool, to measure distance using the second image
:param metric: str, choose either l2 or D to measure distance
:param regress_bs: int, regression batch size when 0 do just sampling.
:return: the best noise and codes
"""
if metric == 'L2':
f = lambda x, y: ((x - y) ** 2).view(n_trials, -1).sum(1)
elif metric == 'classifier':
f = lambda x, y: - self.classifier(x, y).view(-1) + ((x - y) ** 2).view(n_trials, -1).sum(1) / 10
else:
assert metric == 'D'
# turned max into min using minus.
f = lambda x, y: - self.D(x, y).view(-1)
if regress_bs:
z_var = Variable(0.1 * torch.randn(n_trials, self.rand_z_dim).cuda(), requires_grad=True)
c_var = Variable(0.1 * torch.randn(n_trials, self.c_dim).cuda(), requires_grad=True)
# c_var = Variable(self.Q.forward_soft(self.FE(obs.repeat(n_trials, 1, 1, 1))).data, requires_grad=True)
optimizer = optim.Adam([c_var, z_var], lr=1e-2)
n_iters = self.n_closest_iters
for i in range(n_iters):
optimizer.zero_grad()
if self.planner == self.astar_plan:
c = F.tanh(c_var.repeat(regress_bs, 1))
else:
c = c_var.repeat(regress_bs, 1)
_z = z_var.repeat(regress_bs, 1)
c_next = self.T(c)
o, o_next = self.G(_z, c, c_next)
if use_second:
out = o_next
else:
out = o
dist = f(obs.repeat(n_trials * regress_bs, 1, 1, 1), out).sum(0) / regress_bs
if i % 100 == 0:
print("\t Closest code (%d/%d): %.3f" % (i, n_iters, dist))
dist.backward()
optimizer.step()
_z = z_var.detach()
if self.planner == self.astar_plan:
c = F.tanh(c_var.detach())
else:
c = c_var.detach()
else:
_z = Variable(torch.randn(n_trials, self.rand_z_dim)).cuda()
c = self.Q.forward_soft(self.FE(obs)).repeat(n_trials, 1)
# Select best c and c_next from different initializations.
if self.planner == self.astar_plan:
c_next = torch.clamp(self.T(c), -1 + 1e-3, 1 - 1e-3)
else:
c_next = self.T(c)
o, o_next = self.G(_z, c, c_next)
if use_second:
out = o_next
else:
out = o
dist = f(obs.repeat(n_trials, 1, 1, 1), out)
min_dist, min_idx = dist.min(0)
if verbose:
# import ipdb; ipdb.set_trace()
print("\t best_c: %s" % print_array(c[min_idx.item()].data))
print("\t best_c_next: %s" % print_array(c_next[min_idx.item()].data))
print('\t %s measure: %.3f' % (metric, min_dist))
return _z[min_idx].detach(), c[min_idx].detach(), c_next[min_idx].detach(), out[min_idx].detach()
def simple_plan(self, c_start, c_goal, length, verbose=True, **kwargs):
"""
Generate a plan in observation space given start and goal states via interpolation.
:param c_start: bs x c_dim
:param c_goal: bs x c_dim
:return: rollout: horizon x bs x channel_dim x img_W x img_H
"""
with torch.no_grad():
rollout = []
_z = Variable(torch.randn(c_start.size()[0], self.rand_z_dim)).cuda()
for t in range(length):
c = c_start + (c_goal - c_start) * t / length
c_next = c_start + (c_goal - c_start) * (t + 1) / length
# _z = Variable(torch.randn(c.size()[0], self.rand_z_dim)).cuda()
_cur_img, _next_img = self.G(_z, c, c_next)
if t == 0:
rollout.append(_cur_img)
next_img = _next_img
rollout.append(next_img)
if verbose:
# import ipdb; ipdb.set_trace()
print("\t c_%d: %s" % (t, print_array(c[0].data)))
# print("\t Transition var: %s" % print_array(self.T.get_var(c_start[0, None]).data[0]))
# print("\t Direction: %s" % print_array((c_goal-c_start).data[0]/self.planning_horizon))
return rollout
def astar_plan(self, c_start, c_goal, verbose=True, **kwargs):
"""
Generate a plan in observation space given start and goal states via A* search.
:param c_start: bs x c_dim
:param c_goal: bs x c_dim
:return: rollout: horizon x bs x channel_dim x img_W x img_H
"""
with torch.no_grad():
rollout = []
# _z = Variable(torch.randn(c_start.size()[0], self.rand_z_dim)).cuda()
bs = c_start.size()[0]
traj = plan_traj_astar(
kwargs['start_obs'],
kwargs['goal_obs'],
start_state=c_start[0].data.cpu().numpy(),
goal_state=c_goal[0].data.cpu().numpy(),
transition_function=self.continuous_transition_function,
preprocess_function=self.preprocess_function,
discriminator_function=self.discriminator_function_np,
generator_function=self.conditional_generator_function)
for t, disc in enumerate(traj[:-1]):
state = undiscretize(disc.state, self.discretization_bins, self.P.unif_range)
state_next = undiscretize(traj[t + 1].state, self.discretization_bins, self.P.unif_range)
c = from_numpy_to_var(state).repeat(bs, 1)
c_next = from_numpy_to_var(state_next).repeat(bs, 1)
_z = Variable(torch.randn(c.size()[0], self.rand_z_dim)).cuda()
_cur_img, _next_img = self.G(_z, c, c_next)
if t == 0:
rollout.append(_cur_img)
next_img = _next_img
rollout.append(next_img)
if verbose:
# import ipdb; ipdb.set_trace()
print("\t c_%d: %s" % (t, print_array(c[0].data)))
return rollout
| 47.242604 | 147 | 0.500777 |
bead85c2db00d94e176879ac706fbcf051df380e | 2,509 | py | Python | wiretap/__init__.py | CBSDigital/Hiero-Wiretap | c1326382b3c8b111ad682ccaec22990b409aff08 | [
"BSD-3-Clause"
] | 5 | 2016-11-02T16:27:38.000Z | 2018-05-08T06:27:29.000Z | wiretap/__init__.py | CBSDigital/Hiero-Wiretap | c1326382b3c8b111ad682ccaec22990b409aff08 | [
"BSD-3-Clause"
] | null | null | null | wiretap/__init__.py | CBSDigital/Hiero-Wiretap | c1326382b3c8b111ad682ccaec22990b409aff08 | [
"BSD-3-Clause"
] | null | null | null | """Loads the platform-appropriate Wiretap Python bindings.
@details The Wiretap Python extension requires the Boost.Python and dynamic
Wiretap libraries that were both compiled for the current platform and
Python version.
@note Autodesk refers to "Wiretap" in their SDK Documentation and product
materials, while using the convention "WireTap" in their API.
@note For autocompletion in Eclipse, add the folder containing the proper
version of libwiretapPythonClientAPI that was compiled for the currently
selected interpreter to the PYTHONPATH - External Libraries list.
"""
import os.path
import platform
import sys
LIBNAME = 'Wiretap'
def GetLibraryDirPath():
osAbbrev = {
'Windows': 'win',
'Microsoft': 'win',
'Darwin': 'osx',
'Linux': 'linux'
}
systemOS = platform.system()
if sys.maxsize <= 2**32:
arch = 32
else:
arch = 64
# Check whether the OS is in the abbreviation table
try:
platformFolder = osAbbrev[systemOS] + str(arch)
except KeyError:
msg = ("The {0} Python bindings are not available on the {1} "
"operating system.").format(LIBNAME, systemOS)
return '', msg
# Check whether there is a folder for the platform
pkgPath = os.path.dirname(__file__)
curPath = os.path.join(pkgPath, platformFolder)
if not os.path.isdir(curPath):
msg = (
"The {0} Python bindings have not yet been compiled for {1} "
"{2}-bit."
).format(LIBNAME, systemOS, arch)
return '', msg
# Check whether there is a folder for the current Python version
pythonFolder = 'py{0}{1}'.format(*sys.version_info[0:2])
pythonVersion = '{0}.{1}'.format(*sys.version_info[0:2])
curPath = os.path.join(curPath, pythonFolder)
if not os.path.isdir(curPath):
msg = (
"The {0} Python bindings have not yet been compiled for "
"Python {1} on {2} {3}-bit."
).format(LIBNAME, pythonVersion, systemOS, arch)
return '', msg
return curPath, ''
# TO DO: handle unsupported platform
__libDirPath, __errors = GetLibraryDirPath()
if __libDirPath:
if __libDirPath not in sys.path:
sys.path.append(__libDirPath)
else:
raise ImportError(__errors)
from libwiretapPythonClientAPI import *
class WireTapException(Exception):
pass
# Define here other classes/functions that are dependent on Wiretap Python API
| 29.517647 | 79 | 0.654444 |
ddd435c9593af3c813a55e095a7dd14fa5b59ae1 | 2,607 | py | Python | pyserum/market/types.py | dpguoming/pyserum | d0938e9149de67093808c4d89fee1eb6f75fcd52 | [
"MIT"
] | 129 | 2020-08-30T03:50:10.000Z | 2022-03-24T02:52:18.000Z | pyserum/market/types.py | dpguoming/pyserum | d0938e9149de67093808c4d89fee1eb6f75fcd52 | [
"MIT"
] | 87 | 2020-08-30T04:38:30.000Z | 2022-03-23T10:22:11.000Z | pyserum/market/types.py | dpguoming/pyserum | d0938e9149de67093808c4d89fee1eb6f75fcd52 | [
"MIT"
] | 51 | 2020-09-29T13:58:20.000Z | 2022-03-07T09:49:47.000Z | from __future__ import annotations
from typing import NamedTuple
from solana.publickey import PublicKey
from .._layouts.account_flags import ACCOUNT_FLAGS_LAYOUT
from ..enums import Side
class AccountFlags(NamedTuple):
initialized: bool = False
""""""
market: bool = False
""""""
open_orders: bool = False
""""""
request_queue: bool = False
""""""
event_queue: bool = False
""""""
bids: bool = False
""""""
asks: bool = False
""""""
@staticmethod
def from_bytes(buffer: bytes) -> AccountFlags:
con = ACCOUNT_FLAGS_LAYOUT.parse(buffer)
return AccountFlags(
initialized=con.initialized,
market=con.market,
open_orders=con.open_orders,
request_queue=con.request_queue,
event_queue=con.event_queue,
bids=con.bids,
asks=con.asks,
)
class FilledOrder(NamedTuple):
order_id: int
""""""
side: Side
""""""
price: float
""""""
size: float
""""""
fee_cost: int
""""""
class OrderInfo(NamedTuple):
price: float
""""""
size: float
""""""
price_lots: int
""""""
size_lots: int
""""""
class Order(NamedTuple):
order_id: int
""""""
client_id: int
""""""
open_order_address: PublicKey
""""""
open_order_slot: int
""""""
fee_tier: int
""""""
info: OrderInfo
""""""
side: Side
""""""
class ReuqestFlags(NamedTuple):
new_order: bool
cancel_order: bool
bid: bool
post_only: bool
ioc: bool
class Request(NamedTuple):
request_flags: ReuqestFlags
""""""
open_order_slot: int
""""""
fee_tier: int
""""""
max_base_size_or_cancel_id: int
""""""
native_quote_quantity_locked: int
""""""
order_id: int
""""""
open_orders: PublicKey
""""""
client_order_id: int
""""""
class EventFlags(NamedTuple):
fill: bool
out: bool
bid: bool
maker: bool
class Event(NamedTuple):
event_flags: EventFlags
""""""
open_order_slot: int
""""""
fee_tier: int
""""""
native_quantity_released: int
""""""
native_quantity_paid: int
""""""
native_fee_or_rebate: int
""""""
order_id: int
""""""
public_key: PublicKey
""""""
client_order_id: int
""""""
class MarketInfo(NamedTuple):
name: str
""""""
address: PublicKey
""""""
program_id: PublicKey
""""""
class TokenInfo(NamedTuple):
name: str
""""""
address: PublicKey
""""""
| 17.264901 | 57 | 0.553893 |
6003e3cc03352ecb5590280b920b34917d2d7722 | 10,282 | py | Python | 7-4.py | demogest/Python_Practice | 6d074d694e761e0ba390252fbeb1092bb21d26e1 | [
"MIT"
] | null | null | null | 7-4.py | demogest/Python_Practice | 6d074d694e761e0ba390252fbeb1092bb21d26e1 | [
"MIT"
] | null | null | null | 7-4.py | demogest/Python_Practice | 6d074d694e761e0ba390252fbeb1092bb21d26e1 | [
"MIT"
] | null | null | null | from tkinter import *
# 导入ttk
from tkinter import ttk
from tkinter import colorchooser
import math
class App:
def __init__(self, master):
self.master = master
# 保存设置初始的边框宽度
self.width = IntVar()
self.width.set(1)
# 保存设置初始的边框颜色
self.outline = 'black'
# 保存设置初始的填充颜色
self.fill = None
# 记录拖动时前一个点的x、y坐标
self.prevx = self.prevy = -10
# 记录拖动开始的第一个点的x、y坐标
self.firstx = self.firsty = -10
# 记录拖动右键来移动图形时前一个点的x、y坐标
self.mv_prevx = self.mv_prevy = -10
# item_type记录要绘制哪种图形
self.item_type = 0
self.points = []
self.init_widgets()
self.temp_item = None
self.temp_items = []
# 初始化选中的图形项
self.choose_item = None
# 创建界面组件
def init_widgets(self):
self.cv = Canvas(root, background='white')
self.cv.pack(fill=BOTH, expand=True)
# 为鼠标左键拖动事件、鼠标左键释放事件绑定处理函数
self.cv.bind('<B1-Motion>', self.drag_handler)
self.cv.bind('<ButtonRelease-1>', self.release_handler)
# 为鼠标左键双击事件绑定处理函数
self.cv.bind('<Double-1>', self.double_handler)
f = ttk.Frame(self.master)
f.pack(fill=X)
self.bns = []
# 采用循环创建多个按钮,用于绘制不同的图形
for i, lb in enumerate(('直线', '矩形', '椭圆', '多边形', '铅笔')):
bn = Button(f, text=lb, command=lambda i=i: self.choose_type(i))
bn.pack(side=LEFT, ipadx=8, ipady=5, padx=5)
self.bns.append(bn)
# 默认选中直线
self.bns[self.item_type]['relief'] = SUNKEN
ttk.Button(f, text='边框颜色',
command=self.choose_outline).pack(side=LEFT, ipadx=8, ipady=5, padx=5)
ttk.Button(f, text='填充颜色',
command=self.choose_fill).pack(side=LEFT, ipadx=8, ipady=5, padx=5)
om = ttk.OptionMenu(f,
self.width, # 绑定变量
'1', # 设置初始选中值
'0', # 以下多个值用于设置菜单项
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
command=None)
om.pack(side=LEFT, ipadx=8, ipady=5, padx=5)
def choose_type(self, i):
# 将所有按钮恢复默认状态
for b in self.bns: b['relief'] = RAISED
# 将当前按钮设置选中样式
self.bns[i]['relief'] = SUNKEN
# 设置要绘制的图形
self.item_type = i
# 处理选择边框颜色的方法
def choose_outline(self):
# 弹出颜色选择对话框
select_color = colorchooser.askcolor(parent=self.master,
title="请选择边框颜色", color=self.outline)
if select_color is not None:
self.outline = select_color[1]
# 处理选择填充颜色的方法
def choose_fill(self):
# 弹出颜色选择对话框
select_color = colorchooser.askcolor(parent=self.master,
title="请选择填充颜色", color=self.fill)
if select_color is not None:
self.fill = select_color[1]
else:
self.fill = None
def drag_handler(self, event):
# 如果是绘制直线
if self.item_type == 0:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
# 重新绘制虚线
self.temp_item = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, dash=2)
# 如果是绘制矩形或椭圆
if self.item_type == 1:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
leftx, lefty = min(self.firstx, event.x), min(self.firsty, event.y)
rightx, righty = max(self.firstx, event.x), max(self.firsty, event.y)
# 重新绘制虚线选择框
self.temp_item = self.cv.create_rectangle(leftx, lefty, rightx, righty,
dash=2)
if self.item_type == 2:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
radius = math.sqrt((event.x-self.firstx)**2+(event.y-self.firsty)**2)
leftx, lefty = self.firstx-radius,self.firsty-radius
rightx, righty = event.x + radius,event.y+radius
self.temp_item = self.cv.create_oval(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get(),dash=2)
if self.item_type == 3:
self.draw_polygon = True
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
# 重新绘制虚线
self.temp_item = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, dash=2)
if self.item_type == 4:
# 如果前一个点存在(self.prevx 和 self.prevy都大于0)
if self.prevx > 0 and self.prevy > 0:
self.cv.create_line(self.prevx, self.prevy, event.x, event.y,
fill=self.outline, width=self.width.get())
self.prevx, self.prevy = event.x, event.y
def item_bind(self, t):
# 为鼠标右键拖动事件绑定处理函数
self.cv.tag_bind(t, '<B3-Motion>', self.move)
# 为鼠标右键释放事件绑定处理函数
self.cv.tag_bind(t, '<ButtonRelease-3>', self.move_end)
def release_handler(self, event):
# 删除临时绘制的虚线图形项
if self.temp_item is not None:
# 如果不是绘制多边形
if self.item_type != 3:
self.cv.delete(self.temp_item)
# 如果绘制多边形,将之前绘制的虚线先保存下来,以便后面删除它们
else:
self.temp_items.append(self.temp_item)
self.temp_item = None
# 如果是绘制直线
if self.item_type == 0:
# 如果第一个点存在(self.firstx 和 self.firsty都大于0)
if self.firstx > 0 and self.firsty > 0:
# 绘制实际的直线
t = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, fill=self.outline, width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
# 如果是绘制矩形或椭圆
if self.item_type == 1 or self.item_type == 2:
# 如果第一个点存在(self.firstx 和 self.firsty都大于0)
if self.firstx > 0 and self.firsty > 0:
leftx, lefty = min(self.firstx, event.x), min(self.firsty, event.y)
rightx, righty = max(self.firstx, event.x), max(self.firsty, event.y)
if self.item_type == 1:
# 绘制实际的矩形
t = self.cv.create_rectangle(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get())
if self.item_type == 2:
# 绘制实际的椭圆
t = self.cv.create_oval(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
if self.item_type != 3:
self.prevx = self.prevy = -10
self.firstx = self.firsty = -10
# 如果正在绘制多边形
elif (self.draw_polygon):
# 将第一个点添加到列表中
self.points.append((self.firstx, self.firsty))
self.firstx, self.firsty = event.x, event.y
def double_handler(self, event):
# 只处理绘制多边形的情形
if self.item_type == 3:
t = self.cv.create_polygon(*self.points,
outline=self.outline, fill="" if self.fill is None else self.fill,
width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
# 清空所有保存的点数据
self.points.clear()
# 将self.firstx = self.firsty设置为-10,停止绘制
self.firstx = self.firsty = -10
# 删除所有临时的虚线框
for it in self.temp_items: self.cv.delete(it)
self.temp_items.clear()
self.draw_polygon = False
# 根据传入的参数t来选中对应的图形项
def choose_item_handler(self, event, t):
# 使用self.choose_item保存当前选中项
self.choose_item = t
# 定义移动图形项的方法
def move(self, event):
# 如果被选中图形项不为空,才可以执行移动
if self.choose_item is not None:
# 如果前一个点存在(self.mv_prevx 和 self.mv_prevy都大于0)
if self.mv_prevx > 0 and self.mv_prevy > 0:
# 移动选中的图形项
self.cv.move(self.choose_item, event.x - self.mv_prevx,
event.y - self.mv_prevy)
self.mv_prevx, self.mv_prevy = event.x, event.y
# 结束移动的方法
def move_end(self, event):
self.mv_prevx = self.mv_prevy = -10
def delete_item(self, event):
# 如果被选中的item不为空,删除被选中的图形项
if self.choose_item is not None:
self.cv.delete(self.choose_item)
root = Tk()
root.title("绘图工具")
root.geometry('800x680')
app = App(root)
root.bind('<Delete>', app.delete_item)
root.mainloop() | 40.164063 | 110 | 0.51916 |
0d0f76f8520cc734ed25f6a5d0a551e54e8a9bb7 | 3,990 | py | Python | datasets/fbms.py | Schmiddo/d2conv3d | 9b330be56f0dfb9657a63e3fb3394ab36b35a67b | [
"MIT"
] | 16 | 2021-11-16T04:20:32.000Z | 2022-03-10T12:07:13.000Z | datasets/fbms.py | Schmiddo/d2conv3d | 9b330be56f0dfb9657a63e3fb3394ab36b35a67b | [
"MIT"
] | 1 | 2022-02-23T14:25:47.000Z | 2022-02-23T14:25:47.000Z | datasets/fbms.py | Schmiddo/d2conv3d | 9b330be56f0dfb9657a63e3fb3394ab36b35a67b | [
"MIT"
] | 1 | 2022-02-12T07:39:10.000Z | 2022-02-12T07:39:10.000Z | import glob
import os.path as osp
import random as rand
import numpy as np
from imageio import imread
from datasets.base_dataset import VideoSegmentationDataset, INFO, IMAGES_, TARGETS
from utils.registry import register
@register("dataset")
class FBMS(VideoSegmentationDataset):
def __init__(self, root, mode="train", resize_mode=None, resize_shape=None,
clip_size=8, max_tw=16):
if mode not in ("train", "val", "test"):
raise ValueError(f"'mode' should be either train, val, or test but is {mode}")
self.image_dir = osp.join(root, "Trainingset" if mode == "train" else "Testset")
self.mask_dir = osp.join(root, "inst", "train" if mode == "train" else "test")
self.num_frames = {}
super(FBMS, self).__init__(root, mode, resize_mode, resize_shape, clip_size, max_tw)
def read_target(self, sample):
masks = []
shape = sample["info"]["shape"]
for t in sample["targets"]:
if osp.exists(t):
mask = imread(t, pilmode="P")
else:
mask = np.zeros(shape).astype(np.uint8)
masks.append(mask)
return {"mask": np.stack(masks)[..., None]}
def _get_support_indices(self, index, video):
temporal_window = self.max_temporal_gap if self.is_train() else self.tw
start_index = max(0, index - temporal_window//2)
stop_index = min(self.num_frames[video], index + temporal_window//2)
indices = list(range(start_index, stop_index))
if self.is_train():
# TODO: sample without replacement?
indices = sorted(rand.choices(indices, k=self.tw))
else:
missing_frames = self.tw - len(indices)
if min(indices) == 0:
indices = indices + missing_frames * [start_index]
else:
indices = missing_frames * [stop_index-1] + indices
return indices
def _create_sample(self, video, img_list, mask_list, support_indices):
sample = {
IMAGES_: [img_list[s] for s in support_indices],
TARGETS: [mask_list[s] for s in support_indices],
INFO: {
"support_indices": support_indices,
"video": video,
"num_frames": self.num_frames[video],
"gt_frames": np.array([osp.exists(mask_list[s]) for s in support_indices]),
}
}
return sample
def create_sample_list(self):
self.videos = [osp.basename(v) for v in glob.glob(self.image_dir + "/*")]
if len(self.videos) == 0:
raise ValueError(f"Image directory {self.image_dir} is empty")
for video in self.videos:
img_list = sorted(glob.glob(osp.join(self.image_dir, video, "*.jpg")))
mask_list = [
osp.join(self.mask_dir, video, (osp.basename(img)[:-4] + ".png"))
for img in img_list
]
num_frames = len(img_list)
self.num_frames[video] = num_frames
for i, img in enumerate(img_list):
support_indices = self._get_support_indices(i, video)
sample = self._create_sample(video, img_list, mask_list, support_indices)
self.raw_samples.append(sample)
self.samples = self.raw_samples
@register("dataset")
class FBMSInfer(FBMS):
def __init__(self, root, mode="val", resize_mode=None, resize_shape=None,
clip_size=8):
super(FBMSInfer, self).__init__(root, mode, resize_mode, resize_shape, clip_size, clip_size)
def create_sample_list(self):
self.videos = [osp.basename(v) for v in glob.glob(self.image_dir + "/*")]
if len(self.videos) == 0:
raise ValueError(f"Image directory {self.image_dir} is empty")
for video in self.videos:
img_list = sorted(glob.glob(osp.join(self.image_dir, video, "*.jpg")))
mask_list = [
osp.join(self.mask_dir, video, (osp.basename(img)[:-4] + ".png"))
for img in img_list
]
num_frames = len(img_list)
self.num_frames[video] = num_frames
support_indices = list(range(num_frames))
sample = self._create_sample(video, img_list, mask_list, support_indices)
self.raw_samples.append(sample)
self.samples = self.raw_samples
| 36.272727 | 96 | 0.661404 |
5d03d9f9863b7a0f7e49e0c815959c550a9eadd8 | 7,784 | py | Python | biothings/hub/datarelease/releasenote.py | sirloon/biothings.api | 8a981fa2151e368d0ca76aaf226eb565d794d4fb | [
"Apache-2.0"
] | null | null | null | biothings/hub/datarelease/releasenote.py | sirloon/biothings.api | 8a981fa2151e368d0ca76aaf226eb565d794d4fb | [
"Apache-2.0"
] | null | null | null | biothings/hub/datarelease/releasenote.py | sirloon/biothings.api | 8a981fa2151e368d0ca76aaf226eb565d794d4fb | [
"Apache-2.0"
] | null | null | null | from dateutil.parser import parse as dtparse
import locale
locale.setlocale(locale.LC_ALL, '')
class ReleaseNoteTxt(object):
def __init__(self, changes):
self.changes = changes
#pprint(self.changes)
def save(self, filepath):
try:
import prettytable
except ImportError:
raise ImportError("Please install prettytable to use this rendered")
def format_number(n, sign=None):
s = ""
if sign:
if n > 0:
s = "+"
elif n < 0:
s = "-"
try:
n = abs(n)
strn = "%s%s" % (s,locale.format("%d", n, grouping=True))
except TypeError:
# something wrong with converting, maybe we don't even have a number to format...
strn = "N.A"
return strn
txt = ""
title = "Build version: '%s'" % self.changes["new"]["_version"]
txt += title + "\n"
txt += "".join(["="] * len(title)) + "\n"
dt = dtparse(self.changes["generated_on"])
txt += "Previous build version: '%s'\n" % self.changes["old"]["_version"]
txt += "Generated on: %s\n" % dt.strftime("%Y-%m-%d at %H:%M:%S")
txt += "\n"
table = prettytable.PrettyTable(["Updated datasource","prev. release","new release",
"prev. # of docs","new # of docs"])
table.align["Updated datasource"] = "l"
table.align["prev. release"] = "c"
table.align["new release"] = "c"
table.align["prev. # of docs"] = "r"
table.align["new # of docs"] = "r"
for src,info in sorted(self.changes["sources"]["added"].items(),key=lambda e: e[0]):
main_info = dict([(k,v) for k,v in info.items() if k.startswith("_")])
sub_infos = dict([(k,v) for k,v in info.items() if not k.startswith("_")])
if sub_infos:
for sub,sub_info in sub_infos.items():
table.add_row(["%s.%s" % (src,sub),"-",main_info["_version"],"-",format_number(sub_info["_count"])]) # only _count avail there
else:
main_count = main_info.get("_count") and format_number(main_info["_count"]) or ""
table.add_row([src,"-",main_info.get("_version",""),"-",main_count])
for src,info in sorted(self.changes["sources"]["deleted"].items(),key=lambda e: e[0]):
main_info = dict([(k,v) for k,v in info.items() if k.startswith("_")])
sub_infos = dict([(k,v) for k,v in info.items() if not k.startswith("_")])
if sub_infos:
for sub,sub_info in sub_infos.items():
table.add_row(["%s.%s" % (src,sub),main_info.get("_version",""),"-",format_number(sub_info["_count"]),"-"]) # only _count avail there
else:
main_count = main_info.get("_count") and format_number(main_info["_count"]) or ""
table.add_row([src,main_info.get("_version",""),"-",main_count,"-"])
for src,info in sorted(self.changes["sources"]["updated"].items(),key=lambda e: e[0]):
# extract information from main-source
old_main_info = dict([(k,v) for k,v in info["old"].items() if k.startswith("_")])
new_main_info = dict([(k,v) for k,v in info["new"].items() if k.startswith("_")])
old_main_count = old_main_info.get("_count") and format_number(old_main_info["_count"]) or None
new_main_count = new_main_info.get("_count") and format_number(new_main_info["_count"]) or None
if old_main_count is None:
assert new_main_count is None, "Sub-sources found for '%s', old and new count should " % src + \
"both be None. Info was: %s" % info
old_sub_infos = dict([(k,v) for k,v in info["old"].items() if not k.startswith("_")])
new_sub_infos = dict([(k,v) for k,v in info["new"].items() if not k.startswith("_")])
# old & new sub_infos should have the same structure (same existing keys)
# so we just use one of them to explore
if old_sub_infos:
assert new_sub_infos
for sub,sub_info in old_sub_infos.items():
table.add_row(["%s.%s" % (src,sub),old_main_info.get("_version",""),new_main_info.get("_version",""),
format_number(sub_info["_count"]),format_number(new_sub_infos[sub]["_count"])])
else:
assert not new_main_count is None, "No sub-sources found, old and new count should NOT " + \
"both be None. Info was: %s" % info
table.add_row([src,old_main_info.get("_version",""),new_main_info.get("_version",""),
old_main_count,new_main_count])
if table._rows:
txt += table.get_string()
txt += "\n"
else:
txt += "No datasource changed.\n"
total_count = self.changes["new"].get("_count")
if self.changes["sources"]["added"]:
txt += "New datasource(s): %s\n" % ", ".join(sorted(list(self.changes["sources"]["added"])))
if self.changes["sources"]["deleted"]:
txt += "Deleted datasource(s): %s\n" % ", ".join(sorted(list(self.changes["sources"]["deleted"])))
if self.changes["sources"]:
txt += "\n"
table = prettytable.PrettyTable(["Updated stats.","previous","new"])
table.align["Updated stats."] = "l"
table.align["previous"] = "r"
table.align["new"] = "r"
for stat_name,stat in sorted(self.changes["stats"]["added"].items(),key=lambda e: e[0]):
table.add_row([stat_name,"-",format_number(stat["_count"])])
for stat_name,stat in sorted(self.changes["stats"]["deleted"].items(),key=lambda e: e[0]):
table.add_row([stat_name,format_number(stat["_count"]),"-"])
for stat_name,stat in sorted(self.changes["stats"]["updated"].items(),key=lambda e: e[0]):
table.add_row([stat_name,format_number(stat["old"]["_count"]),format_number(stat["new"]["_count"])])
if table._rows:
txt += table.get_string()
txt += "\n\n"
if self.changes["new"]["_fields"]:
new_fields = sorted(self.changes["new"]["_fields"].get("add",[]))
deleted_fields = self.changes["new"]["_fields"].get("remove",[])
updated_fields = self.changes["new"]["_fields"].get("replace",[])
if new_fields:
txt += "New field(s): %s\n" % ", ".join(new_fields)
if deleted_fields:
txt += "Deleted field(s): %s\n" % ", ".join(deleted_fields)
if updated_fields:
txt += "Updated field(s): %s\n" % ", ".join(updated_fields)
txt += "\n"
if not total_count is None:
txt += "Overall, %s documents in this release\n" % (format_number(total_count))
if self.changes["new"]["_summary"]:
sumups = []
sumups.append("%s document(s) added" % format_number(self.changes["new"]["_summary"].get("add",0)))
sumups.append("%s document(s) deleted" % format_number(self.changes["new"]["_summary"].get("delete",0)))
sumups.append("%s document(s) updated" % format_number(self.changes["new"]["_summary"].get("update",0)))
txt += ", ".join(sumups) + "\n"
else:
txt += "No information available for added/deleted/updated documents\n"
if self.changes.get("note"):
txt += "\n"
txt += "Note: %s\n" % self.changes["note"]
with open(filepath,"w") as fout:
fout.write(txt)
return txt
| 50.875817 | 153 | 0.543165 |
bbbb6af384bca3e2a6408bc2da9d0fccef7de0c8 | 1,943 | py | Python | main.py | StephanGuingor/KMeans-ST | fa597b77ad145f4f5144b4f7b2d3ed8bd5a10364 | [
"MIT"
] | null | null | null | main.py | StephanGuingor/KMeans-ST | fa597b77ad145f4f5144b4f7b2d3ed8bd5a10364 | [
"MIT"
] | 2 | 2020-09-17T18:46:59.000Z | 2020-09-17T20:39:56.000Z | main.py | StephanGuingor/KMeans-ST | fa597b77ad145f4f5144b4f7b2d3ed8bd5a10364 | [
"MIT"
] | 3 | 2020-09-17T18:04:07.000Z | 2020-09-17T18:16:15.000Z | import numpy as np
import random
import matplotlib.pyplot as plt
from cercanos import cercanos
from Centros import centros
from sklearn.datasets.samples_generator import make_blobs
from kmeans import *
from pprint import pprint
import pandas as pd
if __name__ == "__main__":
X, y_true = make_blobs(n_samples=300, centers=4,
cluster_std=0.60, random_state=0)
#
#
# # KMeans Via Us
# # cent = kmeans(X,k=4)
# k = 4
# km = KMeansTec(n_clusters=k)
# km.fit(X)
# y_kmeans = km.predict(X)
# # print(y_kmeans)
# plt.subplot(121)
# plt.grid(True)
# plt.title("Nuestro")
# plt.scatter(X[:, 0], X[:, 1], s=50, c=y_kmeans,cmap='coolwarm');
# plt.scatter([c[0] for c in km.centers ], [c[1] for c in km.centers ], c='black', s=200, alpha=0.5)
#
#
# # Sklearn KMeans
# from sklearn.cluster import KMeans
# kmeans = KMeans(n_clusters=k)
# kmeans.fit(X)
# y_kmeans = kmeans.predict(X)
# # print(y_kmeans)
# plt.subplot(122)
# plt.title("Sklearn")
# plt.grid(True)
# # Plot Clusters
# plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='viridis')
# centers = kmeans.cluster_centers_
# plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)
#
# pprint(f"Sklearn -> {centers} |\n US -> {km.centers}")
#
# plt.show()
#
#
df = pd.read_csv("data/iris.data")
X = df.iloc[:,:-1].to_numpy()
k = 3
km = KMeansTec(n_clusters=k)
km.fit(X)
y_kmeans = km.predict(X)
# print(y_kmeans)
# plt.subplot(121)
plt.grid(True)
plt.title("Iris Data Set")
plt.scatter(X[:, 0], X[:, 1], s=50, c=y_kmeans,cmap='coolwarm');
plt.scatter([c[0] for c in km.centers ], [c[1] for c in km.centers ], c='black', s=200, alpha=0.5)
plt.show()
# We could add metrics to check the data
| 29.892308 | 105 | 0.569738 |
05c585d7d6e3c47f8eac8f2d2bfbdd168d4d5e75 | 7,659 | py | Python | cinder/hacking/checks.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | cinder/hacking/checks.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | cinder/hacking/checks.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/test_hacking.py
"""
# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects
UNDERSCORE_IMPORT_FILES = ['./cinder/objects/__init__.py']
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"(.)*_\(\s*('|\")")
vi_header_re = re.compile(r"^#\s+vim?:.+")
underscore_import_check = re.compile(r"(.)*i18n\s+import\s+_(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_audit_log = re.compile(r"(.)*LOG\.audit(.)*")
# NOTE(jsbryant): When other oslo libraries switch over non-namespaced
# imports, we will need to add them to the regex below.
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db"
"|config|utils|serialization|log)")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif(translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def check_no_log_audit(logical_line):
"""Ensure that we are not using LOG.audit messages
Plans are in place going forward as discussed in the following
spec (https://review.openstack.org/#/c/91446/) to take out
LOG.audit messages. Given that audit was a concept invented
for OpenStack we can enforce not using it.
"""
if no_audit_log.match(logical_line):
yield(0, "N324: Found LOG.audit. Use LOG.info instead.")
def check_assert_called_once(logical_line, filename):
msg = ("N327: assert_called_once is a no-op. please use assert_called_"
"once_with to test with explicit parameters or an assertEqual with"
" call_count.")
if 'cinder/tests/' in filename:
pos = logical_line.find('.assert_called_once(')
if pos != -1:
yield (pos, msg)
def validate_log_translations(logical_line, filename):
# TODO(smcginnis): The following is temporary as a series
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
ignore_dirs = [
"cinder/backup",
"cinder/brick",
"cinder/common",
"cinder/db",
"cinder/openstack",
"cinder/scheduler",
"cinder/volume",
"cinder/zonemanager"]
for directory in ignore_dirs:
if directory in filename:
return
# Translations are not required in the test directory.
# This will not catch all instances of violations, just direct
# misuse of the form LOG.info('Message').
if "cinder/tests" in filename:
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = ("N329: LOG.exception and LOG.error messages require "
"translations `_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def check_no_contextlib_nested(logical_line):
msg = ("N339: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested "
"for more information.")
if "with contextlib.nested" in logical_line:
yield(0, msg)
def check_datetime_now(logical_line, noqa):
if noqa:
return
msg = ("C301: Found datetime.now(). "
"Please use timeutils.utcnow() from oslo_utils.")
if 'datetime.now' in logical_line:
yield(0, msg)
def factory(register):
register(no_vi_headers)
register(no_translate_debug_logs)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(check_no_log_audit)
register(check_assert_called_once)
register(check_oslo_namespace_imports)
register(check_no_contextlib_nested)
register(check_datetime_now)
register(validate_log_translations)
| 36.127358 | 78 | 0.685468 |
edecc44b113ec7c2667edd65b605a3a327558ff1 | 2,082 | py | Python | source/tests/test_data_type_mutations.py | pankajagrawal16/aws-control-tower-customizations | e4752bf19a1c8f0a597195982d63a1a2c2dd799a | [
"Apache-2.0"
] | 1 | 2020-02-11T16:34:09.000Z | 2020-02-11T16:34:09.000Z | source/tests/test_data_type_mutations.py | pankajagrawal16/aws-control-tower-customizations | e4752bf19a1c8f0a597195982d63a1a2c2dd799a | [
"Apache-2.0"
] | null | null | null | source/tests/test_data_type_mutations.py | pankajagrawal16/aws-control-tower-customizations | e4752bf19a1c8f0a597195982d63a1a2c2dd799a | [
"Apache-2.0"
] | null | null | null | ######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
from lib.logger import Logger
from trigger_stackset_sm import DeployStackSetStateMachine
log_level = 'info'
logger = Logger(loglevel=log_level)
wait_time = 30
manifest_file_path = './manifest.yaml'
sm_arn_stackset = 'arn::::::stackset'
staging_bucket = 'bucket_name'
execution_mode = 'parallel'
dss = DeployStackSetStateMachine(logger, wait_time, manifest_file_path, sm_arn_stackset, staging_bucket, execution_mode)
def test_list_item_conversion():
list_of_numbers = [1234, 5678]
list_of_strings = dss._convert_list_values_to_string(list_of_numbers)
for string in list_of_strings:
assert type(string) is str | 61.235294 | 134 | 0.446686 |
b61fea75c50705df58c217578b256772b4157aa4 | 9,947 | py | Python | venv/lib/python3.8/site-packages/pip/_vendor/urllib3/util/timeout.py | liuzhongning/python_learn | 47d471e40e6c25271faab549dfa235849264c3b4 | [
"MIT"
] | 178 | 2017-07-18T18:58:36.000Z | 2022-03-31T03:12:52.000Z | env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 301 | 2020-10-03T10:46:31.000Z | 2022-03-27T23:46:23.000Z | env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 86 | 2022-01-04T06:32:30.000Z | 2022-03-30T13:05:51.000Z | from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
# Use time.monotonic if available.
current_time = getattr(time, "monotonic", time.time)
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect = None
def __repr__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
self._read,
self.total,
)
# __str__ provided for backwards compatibility
__str__ = __repr__
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
# Python 3
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not self.DEFAULT_TIMEOUT
and self._read is not None
and self._read is not self.DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| 37.965649 | 84 | 0.640193 |
42744dc1384a54c5a3861caccf53bf77081b9c59 | 4,890 | py | Python | tests/test_similarity_smc.py | private-record-linkage/anonlink | ac3919b38b97ee1ead397dfa4050e533b1a80681 | [
"Apache-2.0"
] | 36 | 2019-04-30T21:01:13.000Z | 2022-02-23T05:28:19.000Z | tests/test_similarity_smc.py | private-record-linkage/anonlink | ac3919b38b97ee1ead397dfa4050e533b1a80681 | [
"Apache-2.0"
] | 328 | 2019-04-15T05:19:36.000Z | 2022-03-09T15:10:14.000Z | tests/test_similarity_smc.py | private-record-linkage/anonlink | ac3919b38b97ee1ead397dfa4050e533b1a80681 | [
"Apache-2.0"
] | 9 | 2019-04-15T01:51:20.000Z | 2021-04-19T05:52:50.000Z | import itertools
import random
import pytest
from bitarray import bitarray
from anonlink.similarities._smc import _smc_sim
from anonlink.similarities import (hamming_similarity,
simple_matching_coefficient)
SIM_FUNS = [hamming_similarity, simple_matching_coefficient]
def test_smc_sim_k():
# This tests an internal function. It may need to change if the
# implementation of `simple_matching_coefficient` changes.
assert _smc_sim(bitarray('0'), bitarray('0')) == 1
assert _smc_sim(bitarray('0'), bitarray('1')) == 0
assert _smc_sim(bitarray('1'), bitarray('0')) == 0
assert _smc_sim(bitarray('1'), bitarray('1')) == 1
assert _smc_sim(bitarray('00'), bitarray('00')) == 1
assert _smc_sim(bitarray('00'), bitarray('01')) == .5
assert _smc_sim(bitarray('00'), bitarray('10')) == .5
assert _smc_sim(bitarray('00'), bitarray('11')) == 0
assert _smc_sim(bitarray('01'), bitarray('00')) == .5
assert _smc_sim(bitarray('01'), bitarray('01')) == 1
assert _smc_sim(bitarray('01'), bitarray('10')) == 0
assert _smc_sim(bitarray('01'), bitarray('11')) == .5
assert _smc_sim(bitarray('10'), bitarray('00')) == .5
assert _smc_sim(bitarray('10'), bitarray('01')) == 0
assert _smc_sim(bitarray('10'), bitarray('10')) == 1
assert _smc_sim(bitarray('10'), bitarray('11')) == .5
assert _smc_sim(bitarray('11'), bitarray('00')) == 0
assert _smc_sim(bitarray('11'), bitarray('01')) == .5
assert _smc_sim(bitarray('11'), bitarray('10')) == .5
assert _smc_sim(bitarray('11'), bitarray('11')) == 1
def _sanity_check_candidates(sims, indices, candidates):
assert len(indices) == 2
assert all(len(i) == len(sims) for i in indices)
assert len(candidates) == len(sims)
assert not candidates or len(next(iter(candidates))) == 2
@pytest.fixture(scope='module',
params=itertools.product([0, 80], [64]))
def datasets(request):
recs_per_dataset, length = request.param
result = tuple([bitarray(random.choices((False, True), k=length))
for _ in range(recs_per_dataset)]
for _ in range(2))
assert len(result) == 2
assert all(len(dataset) == recs_per_dataset for dataset in result)
assert all(len(record) == length for dataset in result
for record in dataset)
return result
@pytest.mark.parametrize('threshold', [1.0, 0.6, 0.0])
@pytest.mark.parametrize('f', SIM_FUNS)
class TestHammingSimilarity:
def test_no_k(self, datasets, threshold, f):
sims, indices = f(datasets, threshold, k=None)
candidates = dict(zip(zip(indices[0], indices[1]), sims))
_sanity_check_candidates(sims, indices, candidates)
for (i0, record0), (i1, record1) \
in itertools.product(enumerate(datasets[0]),
enumerate(datasets[1])):
sim = _smc_sim(record0, record1)
if sim >= threshold:
assert (i0, i1) in candidates
assert candidates[i0, i1] == sim
else:
assert (i0, i1) not in candidates
@pytest.mark.parametrize('k', [0, 20, 80])
def test_k(self, datasets, threshold, k, f):
sims, indices = f(datasets, threshold, k=k)
candidates = dict(zip(zip(indices[0], indices[1]), sims))
_sanity_check_candidates(sims, indices, candidates)
# Make sure we return at most k
for i, _ in enumerate(datasets[0]):
assert sum(indices[0] == i for indices in candidates) <= k
for i, _ in enumerate(datasets[1]):
assert sum(indices[1] == i for indices in candidates) <= k
for (i0, record0), (i1, record1) \
in itertools.product(*map(enumerate, datasets)):
sim = _smc_sim(record0, record1)
if sim >= threshold:
if (i0, i1) not in candidates:
assert (not k
or sim <= min(val
for index, val in candidates.items()
if index[0] == i0)
or sim <= min(val
for index, val in candidates.items()
if index[1] == i1))
else:
assert candidates[i0, i1] == sim
else:
assert (i0, i1) not in candidates
@pytest.mark.parametrize('size', [0, 1, 3, 5])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
@pytest.mark.parametrize('k', [None, 0, 10])
@pytest.mark.parametrize('f', SIM_FUNS)
def test_unsupported_size(size, threshold, k, f):
datasets = [['01001101'] for _ in range(size)]
with pytest.raises(NotImplementedError):
f(datasets, threshold, k=k)
| 39.756098 | 78 | 0.579346 |
2f975d3d870a8284c9bac08a5b41dd92ffabfebf | 1,743 | py | Python | _scripts/postPumlsToServer.py | carlosraphael/java-design-patterns | e425c2ef2f721600e14b59d67eb5ef27759113f0 | [
"MIT"
] | 1 | 2020-03-29T07:29:10.000Z | 2020-03-29T07:29:10.000Z | _scripts/postPumlsToServer.py | trileminh94/java-design-patterns | e425c2ef2f721600e14b59d67eb5ef27759113f0 | [
"MIT"
] | null | null | null | _scripts/postPumlsToServer.py | trileminh94/java-design-patterns | e425c2ef2f721600e14b59d67eb5ef27759113f0 | [
"MIT"
] | 1 | 2020-05-01T10:11:18.000Z | 2020-05-01T10:11:18.000Z | import requests, glob, re, os
# taken from here: http://stackoverflow.com/a/13641746
def replace(file, pattern, subst):
# Read contents from file as a single string
file_handle = open(file, 'r')
file_string = file_handle.read()
file_handle.close()
# Use RE package to allow for replacement (also allowing for (multiline) REGEX)
file_string = (re.sub(pattern, subst, file_string))
# Write contents to file.
# Using mode 'w' truncates the file.
file_handle = open(file, 'w')
file_handle.write(file_string)
file_handle.close()
# list of all puml files
fileList = glob.glob('*/etc/*.puml')
for puml in fileList:
pathSplit = puml.split("/")
# parent folder
parent = pathSplit[0]
# individual artifact/project name
artifact = pathSplit[2].replace(".urm.puml", "")
print "parent: " + parent + "; artifact: " + artifact
# do a POST to the official plantuml hosting site with a little trick "!includeurl" and raw github content
data = {
'text': "!includeurl https://raw.githubusercontent.com/iluwatar/java-design-patterns/master/" + puml
}
r = requests.post('http://plantuml.com/plantuml/uml', data=data)
pumlId = r.url.replace("http://plantuml.com/plantuml/uml/", "")
# the only thing needed to get a png/svg/ascii from the server back
print "Puml Server ID: " + pumlId
# add the id so jekyll/liquid can use it
if (parent == artifact):
replace("./" + parent + "/README.md", "categories:", "pumlid: {}\\ncategories:".format(pumlId))
else:
print "I dont want to program this, just add the following lines to the README.md file that corresponds to this puml file '" + puml + "'\npumlid: {}".format(pumlId)
| 38.733333 | 172 | 0.662651 |
e31aed23b6749c406031290c0080e2b2e64f86cc | 328 | py | Python | app/email.py | NzauM/Blogs | 0fed162b34bda0cfb8f9ed01987d6d618bc9fe7f | [
"MIT"
] | null | null | null | app/email.py | NzauM/Blogs | 0fed162b34bda0cfb8f9ed01987d6d618bc9fe7f | [
"MIT"
] | null | null | null | app/email.py | NzauM/Blogs | 0fed162b34bda0cfb8f9ed01987d6d618bc9fe7f | [
"MIT"
] | null | null | null | from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject,template,to,**kwargs):
sender_email = "[email protected]"
email = Message(subject, sender=sender_email, recipients=[to])
email.html = render_template(template + ".html",**kwargs)
mail.send(email) | 29.818182 | 66 | 0.746951 |
87a1d34b3c942d2051d31ce52c4beec53143092e | 712 | py | Python | ansible/roles/db/molecule/default/tests/test_default.py | kvaga/infra | d65fdbcf9bff2ebb53f4172c1f5437898e45807f | [
"MIT"
] | 1 | 2020-06-15T16:59:53.000Z | 2020-06-15T16:59:53.000Z | ansible/roles/db/molecule/default/tests/test_default.py | kvaga/infra | d65fdbcf9bff2ebb53f4172c1f5437898e45807f | [
"MIT"
] | 2 | 2019-06-13T13:01:30.000Z | 2019-06-30T18:59:19.000Z | ansible/roles/db/molecule/default/tests/test_default.py | kvaga/infra | d65fdbcf9bff2ebb53f4172c1f5437898e45807f | [
"MIT"
] | 1 | 2020-06-09T11:40:54.000Z | 2020-06-09T11:40:54.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is using right port
def test_mongo_port(host):
socket = host.socket("tcp://0.0.0.0:27017")
assert socket.is_listening
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
| 29.666667 | 63 | 0.759831 |
c300dcd52798b88e21b853baf4665be6b9d9f2fa | 5,314 | py | Python | python/lib/Lib/site-packages/django/contrib/gis/db/models/sql/query.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | django/contrib/gis/db/models/sql/query.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | django/contrib/gis/db/models/sql/query.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = dict([(x, None) for x in (
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
)])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| 44.283333 | 97 | 0.65111 |
0ef1e0b23c2ddadf2ebae89fdac03126d2b06aab | 362 | py | Python | actions/chef_ohai.py | StackStorm-Exchange/stackstorm-chef | 3b16fddb07b78a8a37ccdbb0f051c660e7b75bd9 | [
"Apache-2.0"
] | 3 | 2019-04-28T04:50:18.000Z | 2022-03-06T09:04:20.000Z | actions/chef_ohai.py | StackStorm-Exchange/stackstorm-chef | 3b16fddb07b78a8a37ccdbb0f051c660e7b75bd9 | [
"Apache-2.0"
] | null | null | null | actions/chef_ohai.py | StackStorm-Exchange/stackstorm-chef | 3b16fddb07b78a8a37ccdbb0f051c660e7b75bd9 | [
"Apache-2.0"
] | 1 | 2021-01-28T17:43:14.000Z | 2021-01-28T17:43:14.000Z | #!/usr/bin/env python3
import sys
from lib import shellhelpers as shell
def _locate_ohai():
return 'ohai'
if __name__ == '__main__':
# this is a workaround since we use run-remote and it
# passes missing command as None in argv.
command = ([_locate_ohai()] + [i for i in sys.argv[1:] if i != 'None'])
sys.exit(shell.shell_out(command))
| 21.294118 | 75 | 0.668508 |
4209a2a10115c32f50ba0c0476388f918c0c2ff2 | 26,595 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGPS_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGPS_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGPS_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEGPS(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGPS'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEGPS.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEGPS.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEGPS.header.fcoeHeader.eSOF-3',
'DeviceDataFramesDeviceDataInfo': 'fCoEGPS.header.fcHeader.rCTL.deviceDataFrames.deviceDataInfo-4',
'RCTLReserved': 'fCoEGPS.header.fcHeader.rCTL.reserved-5',
'ExtendedLinkServicesInfo': 'fCoEGPS.header.fcHeader.rCTL.extendedLinkServices.info-6',
'Fc4LinkDataInfo': 'fCoEGPS.header.fcHeader.rCTL.fc4LinkData.info-7',
'VideoDataInfo': 'fCoEGPS.header.fcHeader.rCTL.videoData.info-8',
'ExtendedHeaderInfo': 'fCoEGPS.header.fcHeader.rCTL.extendedHeader.info-9',
'BasicLinkServicesInfo': 'fCoEGPS.header.fcHeader.rCTL.basicLinkServices.info-10',
'LinkControlFramesInfo': 'fCoEGPS.header.fcHeader.rCTL.linkControlFrames.info-11',
'ExtendedRoutingInfo': 'fCoEGPS.header.fcHeader.rCTL.extendedRouting.info-12',
'FcHeaderDstId': 'fCoEGPS.header.fcHeader.dstId-13',
'FcHeaderCsCTLPriority': 'fCoEGPS.header.fcHeader.csCTLPriority-14',
'FcHeaderSrcId': 'fCoEGPS.header.fcHeader.srcId-15',
'FcHeaderType': 'fCoEGPS.header.fcHeader.type-16',
'FCTLCustom': 'fCoEGPS.header.fcHeader.fCTL.custom-17',
'BuildFCTLExchangeContext': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.exchangeContext-18',
'BuildFCTLSequenceContext': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.sequenceContext-19',
'BuildFCTLFirstSequence': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.firstSequence-20',
'BuildFCTLLastSequence': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.lastSequence-21',
'BuildFCTLEndSequence': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.endSequence-22',
'BuildFCTLEndConnection': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.endConnection-23',
'BuildFCTLCsCTLPriority': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.csCTLPriority-24',
'BuildFCTLSequenceInitiative': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.sequenceInitiative-25',
'BuildFCTLFcXIDReassigned': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.fcXIDReassigned-26',
'BuildFCTLFcInvalidateXID': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.fcInvalidateXID-27',
'BuildFCTLAckForm': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.ackForm-28',
'BuildFCTLFcDataCompression': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.fcDataCompression-29',
'BuildFCTLFcDataEncryption': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.fcDataEncryption-30',
'BuildFCTLRetransmittedSequence': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.retransmittedSequence-31',
'BuildFCTLUnidirectionalTransmit': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.unidirectionalTransmit-32',
'BuildFCTLContinueSeqCondition': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.continueSeqCondition-33',
'BuildFCTLAbortSeqCondition': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.abortSeqCondition-34',
'BuildFCTLRelativeOffsetPresent': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.relativeOffsetPresent-35',
'BuildFCTLExchangeReassembly': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.exchangeReassembly-36',
'BuildFCTLFillBytes': 'fCoEGPS.header.fcHeader.fCTL.buildFCTL.fillBytes-37',
'FcHeaderSeqID': 'fCoEGPS.header.fcHeader.seqID-38',
'FcHeaderDfCTL': 'fCoEGPS.header.fcHeader.dfCTL-39',
'FcHeaderSeqCNT': 'fCoEGPS.header.fcHeader.seqCNT-40',
'FcHeaderOxID': 'fCoEGPS.header.fcHeader.oxID-41',
'FcHeaderRxID': 'fCoEGPS.header.fcHeader.rxID-42',
'FcHeaderParameter': 'fCoEGPS.header.fcHeader.parameter-43',
'FcCTRevision': 'fCoEGPS.header.fcCT.revision-44',
'FcCTInId': 'fCoEGPS.header.fcCT.inId-45',
'FcCTGsType': 'fCoEGPS.header.fcCT.gsType-46',
'FcCTGsSubtype': 'fCoEGPS.header.fcCT.gsSubtype-47',
'FcCTOptions': 'fCoEGPS.header.fcCT.options-48',
'FcCTReserved': 'fCoEGPS.header.fcCT.reserved-49',
'FCSOpcode': 'fCoEGPS.header.FCS.opcode-50',
'FCSMaxsize': 'fCoEGPS.header.FCS.maxsize-51',
'FCSReserved': 'fCoEGPS.header.FCS.reserved-52',
'FCSPortName': 'fCoEGPS.header.FCS.portName-53',
'FcCRCAutoCRC': 'fCoEGPS.header.fcCRC.autoCRC-54',
'FcCRCGenerateBadCRC': 'fCoEGPS.header.fcCRC.generateBadCRC-55',
'FcTrailerEEOF': 'fCoEGPS.header.fcTrailer.eEOF-56',
'FcTrailerReserved': 'fCoEGPS.header.fcTrailer.reserved-57',
}
def __init__(self, parent, list_op=False):
super(FCoEGPS, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
"""
Display Name: Version
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
"""
Display Name: E-SOF
Default Value: 54
Value Format: decimal
Available enum values: SOFf - Fabric, 40, SOFi4 - Initiate Class 4, 41, SOFi2 - Initiate Class 2, 45, SOFi3 - Initiate Class 3, 46, SOFn4 - Normal Class 4, 49, SOFn2 - Normal Class 2, 53, SOFn3 - Normal Class 3, 54, SOFc4 - Connect Class 4, 57, SOFn1 - Normal Class 1 or 6, 250
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def DeviceDataFramesDeviceDataInfo(self):
"""
Display Name: Information
Default Value: 0
Value Format: decimal
Available enum values: Uncategorized Information, 0, Solicited Data, 1, Unsolicited Control, 2, Solicited Control, 3, Unsolicited Data, 4, Data Descriptor, 5, Unsolicited Command, 6, Command Status, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeviceDataFramesDeviceDataInfo']))
@property
def RCTLReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RCTLReserved']))
@property
def ExtendedLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 33
Value Format: decimal
Available enum values: Solicited Data, 32, Request, 33, Reply, 34
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesInfo']))
@property
def Fc4LinkDataInfo(self):
"""
Display Name: Information
Default Value: 48
Value Format: decimal
Available enum values: Uncategorized Information, 48, Solicited Data, 49, Unsolicited Control, 50, Solicited Control, 51, Unsolicited Data, 52, Data Descriptor, 53, Unsolicited Command, 54, Command Status, 55
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fc4LinkDataInfo']))
@property
def VideoDataInfo(self):
"""
Display Name: Information
Default Value: 68
Value Format: decimal
Available enum values: Unsolicited Data, 68
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VideoDataInfo']))
@property
def ExtendedHeaderInfo(self):
"""
Display Name: Information
Default Value: 80
Value Format: decimal
Available enum values: Virtual Fabric Tagging Header, 80, Inter Fabric Routing Header, 81, Encapsulation Header, 82
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedHeaderInfo']))
@property
def BasicLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 128
Value Format: decimal
Available enum values: No Operation, 128, Abort Sequence, 129, Remove Connection, 130, Basic Accept, 132, Basic Reject, 133, Dedicated Connection Preempted, 134
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BasicLinkServicesInfo']))
@property
def LinkControlFramesInfo(self):
"""
Display Name: Information
Default Value: 192
Value Format: decimal
Available enum values: Acknowledge_1, 128, Acknowledge_0, 129, Nx Port Reject, 130, Fabric Reject, 131, Nx Port Busy, 132, Fabric Busy to Data Frame, 133, Fabric Busy to Link Control Frame, 134, Link Credit Reset, 135, Notify, 136, End, 137
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkControlFramesInfo']))
@property
def ExtendedRoutingInfo(self):
"""
Display Name: Information
Default Value: 240
Value Format: decimal
Available enum values: Vendor Unique, 240
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedRoutingInfo']))
@property
def FcHeaderDstId(self):
"""
Display Name: Destination ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDstId']))
@property
def FcHeaderCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCTLPriority']))
@property
def FcHeaderSrcId(self):
"""
Display Name: Source ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSrcId']))
@property
def FcHeaderType(self):
"""
Display Name: Type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCTLCustom(self):
"""
Display Name: Custom
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCTLCustom']))
@property
def BuildFCTLExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeContext']))
@property
def BuildFCTLSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceContext']))
@property
def BuildFCTLFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFirstSequence']))
@property
def BuildFCTLLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLLastSequence']))
@property
def BuildFCTLEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndSequence']))
@property
def BuildFCTLEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndConnection']))
@property
def BuildFCTLCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLCsCTLPriority']))
@property
def BuildFCTLSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceInitiative']))
@property
def BuildFCTLFcXIDReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcXIDReassigned']))
@property
def BuildFCTLFcInvalidateXID(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcInvalidateXID']))
@property
def BuildFCTLAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAckForm']))
@property
def BuildFCTLFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataCompression']))
@property
def BuildFCTLFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataEncryption']))
@property
def BuildFCTLRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRetransmittedSequence']))
@property
def BuildFCTLUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLUnidirectionalTransmit']))
@property
def BuildFCTLContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLContinueSeqCondition']))
@property
def BuildFCTLAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAbortSeqCondition']))
@property
def BuildFCTLRelativeOffsetPresent(self):
"""
Display Name: Relative Offset Present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRelativeOffsetPresent']))
@property
def BuildFCTLExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeReassembly']))
@property
def BuildFCTLFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFillBytes']))
@property
def FcHeaderSeqID(self):
"""
Display Name: SEQ_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqID']))
@property
def FcHeaderDfCTL(self):
"""
Display Name: DF_CTL
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCTL']))
@property
def FcHeaderSeqCNT(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCNT']))
@property
def FcHeaderOxID(self):
"""
Display Name: OX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxID']))
@property
def FcHeaderRxID(self):
"""
Display Name: RX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxID']))
@property
def FcHeaderParameter(self):
"""
Display Name: Parameter
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcCTRevision(self):
"""
Display Name: Revision
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTRevision']))
@property
def FcCTInId(self):
"""
Display Name: IN_ID
Default Value: 0x000000
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTInId']))
@property
def FcCTGsType(self):
"""
Display Name: GS_Type
Default Value: 250
Value Format: decimal
Available enum values: Event Service, 244, Key Distribution Service, 247, Alias Service, 248, Management Service, 250, Time Service, 251, Directory Service, 252
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsType']))
@property
def FcCTGsSubtype(self):
"""
Display Name: GS_Subtype
Default Value: 0x01
Value Format: hex
Available enum values: Fabric Configuration Server, 1, Unzoned Name Server, 2, Fabric Zone Server, 3, Lock Server, 4, Performance Server, 5, Security Policy Server, 6, Security Information Server, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsSubtype']))
@property
def FcCTOptions(self):
"""
Display Name: Options
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTOptions']))
@property
def FcCTReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTReserved']))
@property
def FCSOpcode(self):
"""
Display Name: Command/Response Code
Default Value: 294
Value Format: decimal
Available enum values: GTIN, 256, GIEL, 257, GIET, 273, GDID, 274, GMID, 275, GFN, 276, GIELN, 277, GMAL, 278, GIEIL, 279, GPL, 280, GPT, 289, GPPN, 290, GAPNL, 292, GPS, 294, GPSC, 295, GSES, 304, GIEAG, 320, GPAG, 321, GPLNL, 401, GPLT, 402, GPLML, 403, GPAB, 407, GNPL, 417, GPNL, 418, GPFCP, 420, GPLI, 421, GNID, 433, RIELN, 533, RPL, 640, RPLN, 657, RPLT, 658, RPLM, 659, RPAB, 664, RPFCP, 666, RPLI, 667, DPL, 896, DPLN, 913, DPLM, 914, DPLML, 915, DPLI, 916, DPAB, 917, DPALL, 927, FTR, 1024, FPNG, 1025
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSOpcode']))
@property
def FCSMaxsize(self):
"""
Display Name: Maximum/Residual Size
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSMaxsize']))
@property
def FCSReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSReserved']))
@property
def FCSPortName(self):
"""
Display Name: Port Name
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSPortName']))
@property
def FcCRCAutoCRC(self):
"""
Display Name: Auto
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCAutoCRC']))
@property
def FcCRCGenerateBadCRC(self):
"""
Display Name: Bad CRC
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCGenerateBadCRC']))
@property
def FcTrailerEEOF(self):
"""
Display Name: E-EOF
Default Value: 65
Value Format: decimal
Available enum values: EOFn - Normal, 65, EOFt - Terminate, 66, EOFrt - Remove Terminate, 68, EOFni - Normal Invalid, 73, EOFrti - Remove Terminate Invalid, 79, EOFa - Abort, 80
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerEEOF']))
@property
def FcTrailerReserved(self):
"""
Display Name: Reserved
Default Value: 0x000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 39.341716 | 519 | 0.667381 |
fb8f4f4332892f85e068f46e07aa5d606bd009f3 | 9,504 | py | Python | bpnet/models.py | AlanNawzadAmin/bpnet | 4c42ad189a624fa82ef97d48117a92a7fb70e830 | [
"MIT"
] | null | null | null | bpnet/models.py | AlanNawzadAmin/bpnet | 4c42ad189a624fa82ef97d48117a92a7fb70e830 | [
"MIT"
] | null | null | null | bpnet/models.py | AlanNawzadAmin/bpnet | 4c42ad189a624fa82ef97d48117a92a7fb70e830 | [
"MIT"
] | null | null | null | import numpy as np
import keras.layers as kl
from keras.optimizers import Adam
from keras.models import Model
from concise.utils.helper import get_from_module
import bpnet
import bpnet.losses as blosses
import gin
import keras
# TODO - setup the following model as a simple bpnet (?)
@gin.configurable
def bpnet_model(tasks,
filters,
n_dil_layers,
conv1_kernel_size,
tconv_kernel_size,
b_loss_weight=1,
c_loss_weight=1,
p_loss_weight=1,
poisson_loss=True,
c_splines=0,
b_splines=20,
merge_profile_reg=False,
lr=0.004,
tracks_per_task=2,
padding='same',
batchnorm=False,
use_bias=False,
n_bias_tracks=2,
profile_metric=None,
count_metric=None,
profile_bias_window_sizes=[1, 50],
seqlen=None,
skip_type='residual'):
"""Setup the BPNet model architecture
Args:
tasks: list of tasks
filters: number of convolutional filters to use at each layer
n_dil_layers: number of dilated convolutional filters to use
conv1_kernel_size: kernel_size of the first convolutional layer
tconv_kernel_size: kernel_size of the transpose/de-convolutional final layer
b_loss_weight: binary classification weight
c_loss_weight: total count regression weight
p_loss_weight: profile regression weight
poisson_loss: use poisson loss for counts
c_splines: number of splines to use in the binary classification output head
p_splines: number of splines to use in the profile regression output head (0=None)
merge_profile_reg: if True, total count and profile prediction will be part of
a single profile output head
lr: learning rate of the Adam optimizer
padding: padding in the convolutional layers
batchnorm: if True, add Batchnorm after every layer. Note: this may mess up the
DeepLIFT contribution scores downstream
use_bias: if True, correct for the bias
n_bias_tracks: how many bias tracks to expect (for both total count and profile regression)
seqlen: sequence length.
skip_type: skip connection type ('residual' or 'dense')
Returns:
bpnet.seqmodel.SeqModel
"""
from bpnet.seqmodel import SeqModel
from bpnet.layers import DilatedConv1D, DeConv1D, GlobalAvgPoolFCN, MovingAverages
from bpnet.metrics import BPNetMetricSingleProfile, default_peak_pred_metric
from bpnet.heads import ScalarHead, ProfileHead
from bpnet.metrics import ClassificationMetrics, RegressionMetrics
from bpnet.losses import multinomial_nll, CountsMultinomialNLL, PoissonMultinomialNLL
import bpnet.losses as bloss
from bpnet.activations import clipped_exp
from bpnet.functions import softmax
assert p_loss_weight >= 0
assert c_loss_weight >= 0
assert b_loss_weight >= 0
# import ipdb
# ipdb.set_trace()
# TODO is it possible to re-instantiate the class to get rid of gin train?
if profile_metric is None:
print("Using the default profile prediction metric")
profile_metric = default_peak_pred_metric
if count_metric is None:
print("Using the default regression prediction metrics")
count_metric = RegressionMetrics()
# Heads -------------------------------------------------
heads = []
# Profile prediction
if p_loss_weight > 0:
if not merge_profile_reg:
heads.append(ProfileHead(target_name='{task}/profile',
net=DeConv1D(n_tasks=tracks_per_task,
filters=filters,
tconv_kernel_size=tconv_kernel_size,
padding=padding,
n_hidden=0,
batchnorm=batchnorm
),
loss=multinomial_nll,
loss_weight=p_loss_weight,
postproc_fn=softmax,
use_bias=use_bias,
bias_input='bias/{task}/profile',
bias_shape=(None, n_bias_tracks),
bias_net=MovingAverages(window_sizes=profile_bias_window_sizes),
metric=profile_metric
))
else:
if poisson_loss:
merge_loss = PoissonMultinomialNLL(c_task_weight=c_loss_weight)
else:
merge_loss = CountsMultinomialNLL(c_task_weight=c_loss_weight)
heads.append(ProfileHead(target_name='{task}/profile',
net=DeConv1D(n_tasks=tracks_per_task,
filters=filters,
tconv_kernel_size=tconv_kernel_size,
padding=padding,
n_hidden=1, # use 1 hidden layer in that case
batchnorm=batchnorm
),
activation=clipped_exp,
loss=merge_loss,
loss_weight=p_loss_weight,
bias_input='bias/{task}/profile',
use_bias=use_bias,
bias_shape=(None, n_bias_tracks),
bias_net=MovingAverages(window_sizes=profile_bias_window_sizes),
metric=BPNetMetricSingleProfile(count_metric=count_metric,
profile_metric=profile_metric)
))
c_loss_weight = 0 # don't need to use the other count loss
# Count regression
if c_loss_weight > 0:
if not merge_profile_reg:
heads.append(ScalarHead(target_name='{task}/counts',
net=GlobalAvgPoolFCN(n_tasks=tracks_per_task,
n_splines=c_splines,
batchnorm=batchnorm),
activation=None,
loss='mse',
loss_weight=c_loss_weight,
bias_input='bias/{task}/counts',
use_bias=use_bias,
bias_shape=(n_bias_tracks, ),
metric=count_metric,
))
# Binary classification
if b_loss_weight > 0:
heads.append(ScalarHead(target_name='{task}/class',
net=GlobalAvgPoolFCN(n_tasks=1,
n_splines=b_splines,
batchnorm=batchnorm),
activation='sigmoid',
loss='binary_crossentropy',
loss_weight=b_loss_weight,
metric=ClassificationMetrics(),
))
# -------------------------------------------------
m = SeqModel(
body=DilatedConv1D(filters=filters,
conv1_kernel_size=conv1_kernel_size,
n_dil_layers=n_dil_layers,
padding=padding,
batchnorm=batchnorm,
skip_type=skip_type),
heads=heads,
tasks=tasks,
optimizer=Adam(lr=lr),
seqlen=seqlen,
)
return m
@gin.configurable
def binary_seq_model(tasks,
net_body,
net_head,
lr=0.004,
seqlen=None):
"""NOTE: This doesn't work with gin-train since
the classes injected by gin-config can't be pickled.
Instead, I created `basset_seq_model`
```
Can't pickle <class 'bpnet.layers.BassetConv'>: it's not the same
object as bpnet.layers.BassetConv
```
"""
from bpnet.seqmodel import SeqModel
from bpnet.heads import ScalarHead, ProfileHead
from bpnet.metrics import ClassificationMetrics
# Heads -------------------------------------------------
heads = [ScalarHead(target_name='{task}/class',
net=net_head,
activation='sigmoid',
loss='binary_crossentropy',
metric=ClassificationMetrics(),
)]
# -------------------------------------------------
m = SeqModel(
body=net_body,
heads=heads,
tasks=tasks,
optimizer=Adam(lr=lr),
seqlen=seqlen,
)
return m
def get(name):
return get_from_module(name, globals())
| 42.618834 | 101 | 0.504104 |
acc9d8c25d9106c2b80d48fd98cc69973be9181f | 13,392 | py | Python | openprocurement/tender/core/tests/utils.py | openprocurement/openprocurement.tender.core | 4a22d8eb4d6fe676f9bc01182f83d50b779c3a8d | [
"Apache-2.0"
] | 1 | 2021-11-18T16:34:33.000Z | 2021-11-18T16:34:33.000Z | openprocurement/tender/core/tests/utils.py | openprocurement/openprocurement.tender.core | 4a22d8eb4d6fe676f9bc01182f83d50b779c3a8d | [
"Apache-2.0"
] | 30 | 2017-03-22T12:16:17.000Z | 2018-08-08T04:27:28.000Z | openprocurement/tender/core/tests/utils.py | openprocurement/openprocurement.tender.core | 4a22d8eb4d6fe676f9bc01182f83d50b779c3a8d | [
"Apache-2.0"
] | 13 | 2017-02-22T15:59:17.000Z | 2018-05-11T06:17:28.000Z | # -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from datetime import datetime, timedelta, time
from mock import patch, MagicMock, call
from schematics.transforms import wholelist
from schematics.types import StringType
from pyramid.exceptions import URLDecodeError
from uuid import uuid4
from openprocurement.tender.core.utils import (
generate_tender_id, tender_serialize, tender_from_data,
register_tender_procurementMethodType, calculate_business_date,
isTender, SubscribersPicker, extract_tender, has_unanswered_complaints,
has_unanswered_questions, remove_draft_bids, save_tender, apply_patch
)
from openprocurement.api.constants import TZ
from openprocurement.tender.core.models import (
Tender as BaseTender, Lot, Complaint, Item, Question, Bid
)
class Tender(BaseTender):
class Options:
roles = {
'draft': wholelist(),
'plain': wholelist()
}
procurementMethodType = StringType(
choices=['esco.EU', 'bellowThreshold', 'aboveThresholdEU'],
default='bellowThreshold'
)
class TestUtils(unittest.TestCase):
def setUp(self):
self.tender_data = {
'id': 'ae50ea25bb1349898600ab380ee74e57',
'dateModified': '2016-04-18T11:26:10.320970+03:00',
'status': 'draft',
'tenderID': 'UA-2016-04-18-000003'
}
self.lots = [Lot({
'id': '11111111111111111111111111111111',
'title': 'Earth',
'value': {'amount': 500000},
'minimalStep': {'amount': 1000}
}), Lot({
'id': '22222222222222222222222222222222',
'title': 'Mars',
'value': {'amount': 600000},
'minimalStep': {'amount': 2000}
})]
self.items = [Item({
'description': 'Some item',
'relatedLot': '11111111111111111111111111111111'
})]
def test_generate_tender_id(self):
server_id = '7'
ctime = datetime.now(TZ)
db = MagicMock()
def db_get(doc_id, default_value):
return default_value
db.get = db_get
tender_id = generate_tender_id(ctime, db, server_id)
tid = 'UA-{:04}-{:02}-{:02}-{:06}{}'.format(
ctime.year, ctime.month, ctime.day, 1,
server_id and '-' + server_id)
self.assertEqual(tid, tender_id)
def test_tender_serialize(self):
request = MagicMock()
request.tender_from_data.return_value = None
request.context = None
tender_data = {}
fields = []
tender = tender_serialize(request, tender_data, fields)
self.assertEqual(
tender, {'procurementMethodType': '', 'dateModified': '', 'id': ''}
)
request.context = self.tender_data
request.tender_from_data.return_value = Tender(self.tender_data)
fields = ['id', 'dateModified', 'status', 'tenderID']
tender = tender_serialize(request, self.tender_data, fields)
self.assertEqual(tender, self.tender_data)
def test_register_tender_procurementMethodType(self):
config = MagicMock()
config.registry.tender_procurementMethodTypes = {}
self.assertEqual(config.registry.tender_procurementMethodTypes, {})
register_tender_procurementMethodType(config, Tender)
bellow_threshold = config.registry.tender_procurementMethodTypes.get(
'bellowThreshold'
)
self.assertEqual(bellow_threshold, Tender)
def test_calculate_business_date(self):
date_obj = datetime(2017,10,7)
delta_obj = timedelta(days=7)
# Test with accelerator = 1440
context = {
"procurementMethodDetails": "quick, accelerator=1440",
"procurementMethodType": "negotiation"
}
business_date = calculate_business_date(
date_obj, delta_obj, context=context, working_days=True)
self.assertEqual(business_date, datetime(2017, 10, 7, 0, 7))
# Test without context and working_days
business_date = calculate_business_date(date_obj, delta_obj)
self.assertEqual(business_date, datetime(2017, 10, 14))
# Test with working_days and timedelta_obj > timedelta()
business_date = calculate_business_date(
date_obj, delta_obj, working_days=True)
self.assertEqual(business_date, datetime(2017, 10, 19))
# Test with working days and timedelta_obj < timedelta()
business_date = calculate_business_date(
date_obj, timedelta(0), working_days=True
)
self.assertEqual(business_date, datetime(2017, 10, 7))
# Another test with working days and timedelta > timedelta()
date_obj = datetime(2017, 10, 15)
delta_obj = timedelta(1)
business_date = calculate_business_date(
date_obj, delta_obj, working_days=True
)
self.assertEqual(business_date, datetime(2017, 10, 18))
@patch('openprocurement.tender.core.utils.error_handler')
def test_tender_from_data(self, mocked_handler):
mocked_handler.return_value = Exception('Mocked!')
request = MagicMock()
request.registry.tender_procurementMethodTypes.get.side_effect = [
None, None, Tender, Tender
]
with self.assertRaises(Exception) as e:
tender_from_data(request, self.tender_data)
self.assertEqual(e.exception.message, 'Mocked!')
self.assertEqual(request.errors.status, 415)
request.errors.add.assert_called_once_with(
'data', 'procurementMethodType', 'Not implemented'
)
model = tender_from_data(request, self.tender_data, raise_error=False)
self.assertIs(model, None)
model = tender_from_data(request, self.tender_data, create=False)
self.assertIs(model, Tender)
model = tender_from_data(request, self.tender_data)
self.assertIsInstance(model, Tender)
@patch('openprocurement.tender.core.utils.decode_path_info')
@patch('openprocurement.tender.core.utils.error_handler')
def test_extract_tender(self, mocked_error_handler, mocked_decode_path):
mocked_error_handler.return_value = Exception('Oops.')
mocked_decode_path.side_effect = [
KeyError('Missing \'PATH_INFO\''),
UnicodeDecodeError('UTF-8', 'obj', 1, 10, 'Hm...'),
'/', '/api/2.3/tenders/{}'.format(self.tender_data['id'])]
tender_data = deepcopy(self.tender_data)
tender_data['doc_type'] = 'Tender'
request = MagicMock()
request.environ = {'PATH_INFO': '/'}
request.registry.tender_procurementMethodTypes.get.return_value = \
Tender
request.tender_from_data.return_value = \
tender_from_data(request, tender_data)
request.registry.db = MagicMock()
# Test with KeyError
self.assertIs(extract_tender(request), None)
# Test with UnicodeDecodeError
with self.assertRaises(URLDecodeError) as e:
extract_tender(request)
self.assertEqual(e.exception.encoding, 'UTF-8')
self.assertEqual(e.exception.object, 'obj')
self.assertEqual(e.exception.start, 1)
self.assertEqual(e.exception.end, 10)
self.assertEqual(e.exception.reason, 'Hm...')
self.assertIsInstance(e.exception, URLDecodeError)
# Test with path '/'
self.assertIs(extract_tender(request), None)
mocked_decode_path.side_effect = ['/api/2.3/tenders/{}'.format(
self.tender_data['id'])] * 3
# Test with extract_tender_adapter raise HTTP 410
request.registry.db.get.return_value = {'doc_type': 'tender'}
with self.assertRaises(Exception) as e:
extract_tender(request)
self.assertEqual(request.errors.status, 410)
request.errors.add.assert_called_once_with(
'url', 'tender_id', 'Archived')
# Test with extract_tender_adapter raise HTTP 404
request.registry.db.get.return_value = {'doc_type': 'notTender'}
with self.assertRaises(Exception) as e:
extract_tender(request)
self.assertEqual(request.errors.status, 404)
request.errors.add.assert_has_calls([
call('url', 'tender_id', 'Not Found')])
# Test with extract_tender_adapter return Tender object
request.registry.db.get.return_value = tender_data
tender = extract_tender(request)
serialized_tender = tender.serialize('draft')
self.assertIsInstance(tender, Tender)
for k in tender_data:
self.assertEqual(tender_data[k], serialized_tender[k])
def test_has_unanswered_complaints(self):
tender = Tender(self.tender_data)
tender.block_tender_complaint_status = ['pending']
tender.lots = self.lots
tender.complaints = [Complaint({
'status': 'pending',
'relatedLot': '11111111111111111111111111111111',
'title': 'Earth is mine!'
})]
self.assertEqual(True, has_unanswered_complaints(tender))
tender.complaints[0].relatedLot = '33333333333333333333333333333333'
self.assertEqual(False, has_unanswered_complaints(tender))
self.assertEqual(True, has_unanswered_complaints(tender, False))
tender.complaints[0].status = 'resolved'
self.assertEqual(False, has_unanswered_complaints(tender, False))
def test_has_unanswered_questions(self):
tender = Tender(self.tender_data)
tender.lots = self.lots
tender.items = self.items
tender.questions = [Question({
'questionOf': 'lot',
'relatedItem': '11111111111111111111111111111111',
'title': 'Do you have some Earth?'
})]
self.assertEqual(True, has_unanswered_questions(tender))
self.assertEqual(True, has_unanswered_questions(tender, False))
tender.questions[0].answer = 'No'
self.assertEqual(False, has_unanswered_questions(tender))
self.assertEqual(False, has_unanswered_questions(tender, False))
def test_remove_draft_bids(self):
tender = Tender(self.tender_data)
tender.bids = [Bid(), Bid({'status': 'draft'})]
request = MagicMock()
request.validated = {'tender': tender}
self.assertEqual(len(tender.bids), 2)
remove_draft_bids(request)
self.assertEqual(len(tender.bids), 1)
self.assertEqual(tender.bids[0].status, 'active')
def test_save_tender_without_date_obj(self):
tender_src = {
'status': 'active.tendering',
'title': 'Non secret purchase',
'date': datetime.now(TZ).isoformat()
}
validated_tender_data = deepcopy(self.tender_data)
validated_tender_data['title'] = 'Top Secret Purchase'
validated_tender_data['_rev'] = '1-{}'.format(uuid4().hex)
validated_tender_data['mode'] = 'test'
validated_tender = Tender(validated_tender_data)
request = MagicMock()
request.registry.db.save.return_value = (validated_tender.id,
validated_tender_data['_rev'])
request.authenticated_userid = 'administrator'
request.validated = {'tender_src': tender_src,
'tender': validated_tender}
res = save_tender(request)
self.assertEqual(res, True)
@patch('openprocurement.tender.core.utils.save_tender')
def test_apply_patch(self, mocked_save):
request = MagicMock()
request.validated = {'data': {'status': 'active.tendering'}}
request.context = Tender(self.tender_data)
apply_patch(request)
mocked_save.assert_called_once_with(request)
class TestIsTender(TestUtils):
def test_is_tender(self):
tender = Tender(self.tender_data)
is_tender = isTender('bellowThreshold', None)
self.assertEqual(is_tender.phash(),
'procurementMethodType = bellowThreshold')
request = MagicMock()
request.tender = None
self.assertEqual(False, is_tender(None, request))
request.tender = tender
self.assertEqual(True, is_tender(None, request))
is_tender = isTender('esco.EU', None)
self.assertEqual(is_tender.phash(), 'procurementMethodType = esco.EU')
self.assertEqual(False, is_tender(None, request))
self.assertEqual(tender.procurementMethodType, 'bellowThreshold')
def test_subcribers_picker(self):
picker = SubscribersPicker('bellowThreshold', None)
tender = Tender(self.tender_data)
event = MagicMock()
event.tender = None
self.assertEqual(picker.phash(),
'procurementMethodType = bellowThreshold')
self.assertEqual(False, picker(event))
event.tender = tender
self.assertEqual(True, picker(event))
picker = SubscribersPicker('esco.EU', None)
self.assertEqual(picker.phash(), 'procurementMethodType = esco.EU')
self.assertEqual(False, picker(event))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUtils))
suite.addTest(unittest.makeSuite(TestIsTender))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite') | 38.59366 | 79 | 0.648073 |
f69a357a002c4d15374120d54dbad89387c0e497 | 76,801 | py | Python | sky/tools/webkitpy/layout_tests/port/base.py | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | 1 | 2021-06-12T00:47:11.000Z | 2021-06-12T00:47:11.000Z | sky/tools/webkitpy/layout_tests/port/base.py | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | null | null | null | sky/tools/webkitpy/layout_tests/port/base.py | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class of Port-specific entry points for the layout tests
test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
import itertools
import logging
import math
import operator
import optparse
import os
import re
import subprocess
import sys
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.path import cygpath
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import config as port_config
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import server_process
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http
from webkitpy.layout_tests.servers import pywebsocket
from skypy.skyserver import SkyServer
_log = logging.getLogger(__name__)
# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'mac', 'win', 'gtk'; there is probably (?)
# one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
# Test names resemble unix relative paths, and use '/' as a directory separator.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
SKY_SHELL_NAME = 'sky_shell'
# True if the port as aac and mp3 codecs built in.
PORT_HAS_AUDIO_CODECS_BUILT_IN = False
ALL_SYSTEMS = (
('snowleopard', 'x86'),
('lion', 'x86'),
# FIXME: We treat Retina (High-DPI) devices as if they are running
# a different operating system version. This isn't accurate, but will work until
# we need to test and support baselines across multiple O/S versions.
('retina', 'x86'),
('mountainlion', 'x86'),
('mavericks', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'),
# FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
# If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
('icecreamsandwich', 'x86'),
)
ALL_BASELINE_VARIANTS = [
'mac-mavericks', 'mac-mountainlion', 'mac-retina', 'mac-lion', 'mac-snowleopard',
'win-win7', 'win-xp',
'linux-x86_64', 'linux-x86',
]
CONFIGURATION_SPECIFIER_MACROS = {
'mac': ['snowleopard', 'lion', 'retina', 'mountainlion', 'mavericks'],
'win': ['xp', 'win7'],
'linux': ['lucid'],
'android': ['icecreamsandwich'],
}
DEFAULT_BUILD_DIRECTORIES = ('out',)
# overridden in subclasses.
FALLBACK_PATHS = {}
SUPPORTED_VERSIONS = []
# URL to the build requirements page.
BUILD_REQUIREMENTS_URL = ''
@classmethod
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
@classmethod
def _static_build_path(cls, filesystem, build_directory, chromium_base, configuration, comps):
if build_directory:
return filesystem.join(build_directory, configuration, *comps)
hits = []
for directory in cls.DEFAULT_BUILD_DIRECTORIES:
base_dir = filesystem.join(chromium_base, directory, configuration)
path = filesystem.join(base_dir, *comps)
if filesystem.exists(path):
hits.append((filesystem.mtime(path), path))
if hits:
hits.sort(reverse=True)
return hits[0][1] # Return the newest file found.
# We have to default to something, so pick the last one.
return filesystem.join(base_dir, *comps)
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
# options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._webkit_finder = WebKitFinder(host.filesystem)
self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
self._helper = None
self._sky_server = None
self._websocket_server = None
self._image_differ = None
self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
self._dump_reader = None
# Python's Popen has a bug that causes any pipes opened to a
# process that can't be executed to be leaked. Since this
# code is specifically designed to tolerate exec failures
# to gracefully handle cases where wdiff is not installed,
# the bug results in a massive file descriptor leak. As a
# workaround, if an exec failure is ever experienced for
# wdiff, assume it's not available. This will leak one
# file descriptor but that's better than leaking each time
# wdiff would be run.
#
# http://mail.python.org/pipermail/python-list/
# 2008-August/505753.html
# http://bugs.python.org/issue3210
self._wdiff_available = None
# FIXME: prettypatch.py knows this path, why is it copied here?
self._pretty_patch_path = self.path_from_webkit_base("tools", "third_party", "PrettyPatch", "prettify.rb")
self._pretty_patch_available = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration', self.default_configuration())
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
def buildbot_archives_baselines(self):
return True
def supports_per_test_timeout(self):
return False
def default_pixel_tests(self):
return False
def default_smoke_test_only(self):
return False
def default_timeout_ms(self):
timeout_ms = 4000
return timeout_ms
def driver_stop_timeout(self):
""" Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
# well (for things like ASAN, Valgrind, etc.)
return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
return self._wdiff_available
def pretty_patch_available(self):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
def default_child_processes(self):
"""Return the number of drivers to use for this port."""
return self._executive.cpu_count()
def default_max_locked_shards(self):
"""Return the number of "locked" shards to run in parallel (like the http tests)."""
return self.default_child_processes()
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines in for this port."""
# FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
return self.baseline_version_dir()
def baseline_platform_dir(self):
"""Return the absolute path to the default (version-independent) platform-specific results."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def virtual_baseline_search_path(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return [self._filesystem.join(path, suite.name) for path in self.default_baseline_search_path()]
def baseline_search_path(self):
return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def _check_file_exists(self, path_to_file, file_description,
override_step=None, logging=True):
"""Verify the file is present where expected or log an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
logging: Whether or not log the error messages."""
if not self._filesystem.exists(path_to_file):
if logging:
_log.error('Unable to find %s' % file_description)
_log.error(' at %s' % path_to_file)
if override_step:
_log.error(' %s' % override_step)
_log.error('')
return False
return True
def check_build(self, needs_http, printer):
result = True
dump_render_tree_binary_path = self._path_to_driver()
result = self._check_file_exists(dump_render_tree_binary_path,
'test driver') and result
if not result and self.get_option('build'):
result = self._check_driver_build_up_to_date(
self.get_option('configuration'))
else:
_log.error('')
if self.get_option('pixel_tests'):
result = self.check_image_diff(
'To override, invoke with --no-pixel-tests') and result
# It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
self._pretty_patch_available = self.check_pretty_patch()
self._wdiff_available = self.check_wdiff()
if self._dump_reader:
result = self._dump_reader.check_is_functional() and result
if needs_http:
result = self.check_httpd() and result
return test_run_results.OK_EXIT_STATUS if result else test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
def _check_driver(self):
driver_path = self._path_to_driver()
if not self._filesystem.exists(driver_path):
_log.error("%s was not found at %s" % (self.driver_name(), driver_path))
return False
return True
def _check_port_build(self):
# Ports can override this method to do additional checks.
return True
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def check_image_diff(self, override_step=None, logging=True):
"""This routine is used to check whether image_diff binary exists."""
image_diff_path = self._path_to_image_diff()
if not self._filesystem.exists(image_diff_path):
_log.error("image_diff was not found at %s" % image_diff_path)
return False
return True
def check_pretty_patch(self, logging=True):
"""Checks whether we can use the PrettyPatch ruby script."""
try:
_ = self._executive.run_command(['ruby', '--version'])
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
if logging:
_log.warning("Ruby is not installed; can't generate pretty patches.")
_log.warning('')
return False
if not self._filesystem.exists(self._pretty_patch_path):
if logging:
_log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
_log.warning('')
return False
return True
def check_wdiff(self, logging=True):
if not self._path_to_wdiff():
# Don't need to log here since this is the port choosing not to use wdiff.
return False
try:
_ = self._executive.run_command([self._path_to_wdiff(), '--help'])
except OSError:
if logging:
message = self._wdiff_missing_message()
if message:
for line in message.splitlines():
_log.warning(' ' + line)
_log.warning('')
return False
return True
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install it to generate word-by-word diffs.'
def check_httpd(self):
httpd_path = self.path_to_apache()
try:
server_name = self._filesystem.basename(httpd_path)
env = self.setup_environ_for_server(server_name)
if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
_log.error("httpd seems broken. Cannot run http tests.")
return False
return True
except OSError:
_log.error("No httpd found. Cannot run http tests.")
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents):
"""Compare two images and return a tuple of an image diff, and an error string.
If an error occurs (like image_diff isn't found, or crashes, we log an error and return True (for a diff).
"""
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents:
return (expected_contents, None)
if not expected_contents:
return (actual_contents, None)
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), "expected.png")
self._filesystem.write_binary_file(expected_filename, expected_contents)
actual_filename = self._filesystem.join(str(tempdir), "actual.png")
self._filesystem.write_binary_file(actual_filename, actual_contents)
diff_filename = self._filesystem.join(str(tempdir), "diff.png")
# image_diff needs native win paths as arguments, so we need to convert them if running under cygwin.
native_expected_filename = self._convert_path(expected_filename)
native_actual_filename = self._convert_path(actual_filename)
native_diff_filename = self._convert_path(diff_filename)
executable = self._path_to_image_diff()
# Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
result = None
err_str = None
try:
exit_code = self._executive.run_command(comand, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = None
elif exit_code == 1:
result = self._filesystem.read_binary_file(native_diff_filename)
else:
err_str = "Image diff returned an exit code of %s. See http://crbug.com/278596" % exit_code
except OSError, e:
err_str = 'error running image diff: %s' % str(e)
finally:
self._filesystem.rmtree(str(tempdir))
return (result, err_str or None)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format."""
# The filenames show up in the diff output, make sure they're
# raw bytes and not unicode, so that they don't trigger join()
# trying to decode the input.
def to_raw_bytes(string_value):
if isinstance(string_value, unicode):
return string_value.encode('utf-8')
return string_value
expected_filename = to_raw_bytes(expected_filename)
actual_filename = to_raw_bytes(actual_filename)
diff = difflib.unified_diff(expected_text.splitlines(True),
actual_text.splitlines(True),
expected_filename,
actual_filename)
# The diff generated by the difflib is incorrect if one of the files
# does not have a newline at the end of the file and it is present in
# the diff. Relevant Python issue: http://bugs.python.org/issue2142
def diff_fixup(diff):
for line in diff:
yield line
if not line.endswith('\n'):
yield '\n\ No newline at end of file\n'
return ''.join(diff_fixup(diff))
def driver_name(self):
return self.SKY_SHELL_NAME
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in
a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
# FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
# We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
for extension in self.baseline_extensions():
path = self.expected_filename(test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(path) if path else path
return baseline_dict
def baseline_extensions(self):
"""Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
return ('.wav', '.txt', '.png')
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
Args:
test_name: name of test file (usually a relative path under tests/)
suffix: file suffix of the expected results, including dot; e.g.
'.txt' or '.png'. This should not be None, but may be an empty
string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.layout_tests_dir()
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
Args:
test_name: name of test file (usually a relative path under tests/)
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'win', or
'chromium-cg-mac-leopard' (we follow the WebKit format)
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
"""
# FIXME: The [0] here is very mysterious, as is the destructured return.
platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
if return_default:
return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'."""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace("\r\n", "\n")
def _get_reftest_list(self, test_name):
dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
if dirname not in self._reftest_list:
self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
return self._reftest_list[dirname]
@staticmethod
def _parse_reftest_list(filesystem, test_dirpath):
reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
if not filesystem.isfile(reftest_list_path):
return None
reftest_list_file = filesystem.read_text_file(reftest_list_path)
parsed_list = {}
for line in reftest_list_file.split('\n'):
line = re.sub('#.+$', '', line)
split_line = line.split()
if len(split_line) == 4:
# FIXME: Probably one of mozilla's extensions in the reftest.list format. Do we need to support this?
_log.warning("unsupported reftest.list line '%s' in %s" % (line, reftest_list_path))
continue
if len(split_line) < 3:
continue
expectation_type, test_file, ref_file = split_line
parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
return parsed_list
def reference_files(self, test_name):
"""Return a list of expectation (== or !=) and filename pairs"""
reftest_list = self._get_reftest_list(test_name)
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
for extention in Port._supported_file_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
def tests(self, paths):
"""Return the list of tests found matching paths."""
tests = self._real_tests(paths)
tests.extend(self._virtual_tests(paths, self.populated_virtual_test_suites()))
return tests
def _real_tests(self, paths):
# When collecting test cases, skip these directories
skipped_directories = set(['.svn', '_svn', 'platform', 'resources', 'support', 'script-tests', 'reference', 'reftest', 'conf'])
files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port.is_test_file, self.test_key)
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.sky', '.dart'])
@staticmethod
# If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
def is_reference_html_file(filesystem, dirname, filename):
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_wihout_ext, unused = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
return False
@staticmethod
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
return extension in Port._supported_file_extensions
@staticmethod
def is_test_file(filesystem, dirname, filename):
return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
ALL_TEST_TYPES = ['audio', 'harness', 'pixel', 'ref', 'text', 'unknown']
def test_type(self, test_name):
fs = self._filesystem
if fs.exists(self.expected_filename(test_name, '.png')):
return 'pixel'
if fs.exists(self.expected_filename(test_name, '.wav')):
return 'audio'
if self.reference_files(test_name):
return 'ref'
txt = self.expected_text(test_name)
if txt:
if 'layer at (0,0) size 800x600' in txt:
return 'pixel'
for line in txt.splitlines():
if line.startswith('FAIL') or line.startswith('TIMEOUT') or line.startswith('PASS'):
return 'harness'
return 'text'
return 'unknown'
def test_key(self, test_name):
"""Turns a test name into a list with two sublists, the natural key of the
dirname, and the natural key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories."""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
""" Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
layout_tests_dir = self.layout_tests_dir()
return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
self._filesystem.listdir(layout_tests_dir))
@memoized
def test_isfile(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Return True if the test name refers to an existing test or baseline."""
# Used by test_expectations.py to determine if an entry refers to a
# valid test and by printing.py to determine if baselines exist.
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT command line that will be used."""
driver = self.create_driver(0)
return driver.cmd_line(self.get_option('pixel_tests'), [])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
# FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
def webkit_base(self):
return self._webkit_finder.webkit_base()
def path_from_webkit_base(self, *comps):
return self._webkit_finder.path_from_webkit_base(*comps)
def path_from_chromium_base(self, *comps):
return self._webkit_finder.path_from_chromium_base(*comps)
def path_to_script(self, script_name):
return self._webkit_finder.path_to_script(script_name)
def layout_tests_dir(self):
return self._webkit_finder.layout_tests_dir()
def perf_tests_dir(self):
return self._webkit_finder.perf_tests_dir()
def skipped_layout_tests(self, test_list):
"""Returns tests skipped outside of the TestExpectations files."""
return set(self._skipped_tests_for_unsupported_features(test_list))
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
for line in skipped_file_contents.split('\n'):
line = line.strip()
line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
if line.startswith('#') or not len(line):
continue
tests_to_skip.append(line)
return tests_to_skip
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
continue
_log.debug("Using Skipped file: %s" % filename)
skipped_file_contents = self._filesystem.read_text_file(filename)
tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def is_chromium(self):
return True
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
def operating_system(self):
# Subclasses should override this default implementation.
return 'mac'
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'leopard' or 'xp'.
This is used to help identify the exact port when parsing test
expectations, determining search paths, and logging information."""
return self._version
def architecture(self):
return self._architecture
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
@memoized
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the tests
directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.layout_tests_dir()):
return self.host.filesystem.relpath(filename, self.layout_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
return self._filesystem.join(self.layout_tests_dir(), test_name)
def results_directory(self):
"""Absolute path to the place to store the test results (uses --results-directory)."""
if not self._results_directory:
option_val = self.get_option('results_directory') or self.default_results_directory()
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def perf_results_directory(self):
return self._build_path()
def analyzer_build_directory(self):
return self._build_path()
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
return self._build_path('layout-test-results')
def setup_test_run(self):
"""Perform port-specific work at the beginning of a test run."""
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, "cache")
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
if self._dump_reader:
self._filesystem.maybe_make_directory(self._dump_reader.crash_dumps_directory())
def num_workers(self, requested_num_workers):
"""Returns the number of available workers (possibly less than the number requested)."""
return requested_num_workers
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
return os.environ[name]
return default
def _copy_value_from_environ_if_set(self, clean_env, name):
if name in os.environ:
clean_env[name] = os.environ[name]
def setup_environ_for_server(self, server_name=None):
# We intentionally copy only a subset of os.environ when
# launching subprocesses to ensure consistent test results.
clean_env = {
'LOCAL_RESOURCE_ROOT': self.layout_tests_dir(), # FIXME: Is this used?
}
variables_to_copy = [
'WEBKIT_TESTFONTS', # FIXME: Is this still used?
'WEBKITOUTPUTDIR', # FIXME: Is this still used?
'CHROME_DEVEL_SANDBOX',
'CHROME_IPC_LOGGING',
'ASAN_OPTIONS',
'VALGRIND_LIB',
'VALGRIND_LIB_INNER',
]
if self.host.platform.is_linux() or self.host.platform.is_freebsd():
variables_to_copy += [
'XAUTHORITY',
'HOME',
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
'XDG_DATA_DIRS',
]
clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
if self.host.platform.is_mac():
clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
clean_env['DYLD_FRAMEWORK_PATH'] = self._build_path()
variables_to_copy += [
'HOME',
]
if self.host.platform.is_win():
variables_to_copy += [
'PATH',
'GYP_DEFINES', # Required to locate win sdk.
]
if self.host.platform.is_cygwin():
variables_to_copy += [
'HOMEDRIVE',
'HOMEPATH',
'_NT_SYMBOL_PATH',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""This routine should display the HTML file pointed at by
results_filename in a users' browser."""
return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Return a newly created Driver subclass for starting/stopping the test driver."""
return self._driver_class()(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
things to ensure a known test configuration, it should override this
method."""
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
# Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = self._executive.popen([helper_path],
stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("layout_test_helper failed to be ready")
def requires_sky_server(self):
"""Does the port require an HTTP server for running tests? This could
be the case when the tests aren't run on the host platform."""
return True
def _dart_packages_root(self):
return self.path_from_chromium_base('sky', 'packages', 'workbench', 'packages')
def start_sky_server(self, additional_dirs, number_of_drivers):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running."""
assert not self._sky_server, 'Already running an http server.'
self._sky_server = SkyServer(8000, self.path_from_chromium_base(), self._dart_packages_root())
self._sky_server.start()
def start_websocket_server(self):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running."""
assert not self._websocket_server, 'Already running a websocket server.'
server = pywebsocket.PyWebSocket(self, self.results_directory())
server.start()
self._websocket_server = server
def http_server_supports_ipv6(self):
# Apache < 2.4 on win32 does not support IPv6, nor does cygwin apache.
if self.host.platform.is_cygwin() or self.host.platform.is_win():
return False
return True
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
it isn't, or it isn't available. If a port overrides start_helper()
it must override this routine as well."""
if self._helper:
_log.debug("Stopping layout test helper")
try:
self._helper.stdin.write("x\n")
self._helper.stdin.close()
self._helper.wait()
except IOError, e:
pass
finally:
self._helper = None
def stop_sky_server(self):
"""Shut down the Http server if it is running. Do nothing if it isn't."""
if self._sky_server:
self._sky_server.stop()
self._sky_server = None
def stop_websocket_server(self):
"""Shut down the websocket server if it is running. Do nothing if it isn't."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
gn_args = self._executive.run_command([
'gn', 'args',
self._build_path_with_configuration(self._options.configuration),
'--list', '--short'])
if 'is_debug = true' in gn_args:
configuration = 'debug'
else:
configuration = 'release'
self._test_configuration = TestConfiguration(self._version, self._architecture, configuration)
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port."""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(lucid) -> linux # Change specific name of the Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
return self.CONFIGURATION_SPECIFIER_MACROS
def all_baseline_variants(self):
"""Returns a list of platform names sufficient to cover all the baselines.
The list should be sorted so that a later platform will reuse
an earlier platform's baselines if they are the same (e.g.,
'snowleopard' should precede 'leopard')."""
return self.ALL_BASELINE_VARIANTS
def _generate_all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self.ALL_SYSTEMS:
for build_type in self.ALL_BUILD_TYPES:
test_configurations.append(TestConfiguration(version, architecture, build_type))
return test_configurations
try_builder_names = frozenset([
'linux_layout',
'mac_layout',
'win_layout',
'linux_layout_rel',
'mac_layout_rel',
'win_layout_rel',
])
def warn_if_bug_missing_in_test_expectations(self):
return True
def _port_specific_expectations_files(self):
return []
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the filesystem.
If the name is a path, the file can be considered updatable for things like rebaselining,
so don't use names that are paths if they're not paths.
Generally speaking the ordering should be files in the filesystem in cascade order
(TestExpectations followed by Skipped, if the port honors both formats),
then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
# FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
expectations = OrderedDict()
for path in self.expectations_files():
if self._filesystem.exists(path):
expectations[path] = self._filesystem.read_text_file(path)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
if self._filesystem.exists(expanded_path):
_log.debug("reading additional_expectations from path '%s'" % path)
expectations[path] = self._filesystem.read_text_file(expanded_path)
else:
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
def bot_expectations(self):
if not self.get_option('ignore_flaky_tests'):
return {}
full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
builder_category = self.get_option('ignore_builder_category', 'layout')
factory = BotTestExpectationsFactory()
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
expectations = factory.expectations_for_port(full_port_name, builder_category)
if not expectations:
return {}
ignore_mode = self.get_option('ignore_flaky_tests')
if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
return expectations.flakes_by_path(ignore_mode == 'very-flaky')
if ignore_mode == 'unexpected':
return expectations.unexpected_results_by_path()
_log.warning("Unexpected ignore mode: '%s'." % ignore_mode)
return {}
def expectations_files(self):
return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
return [('blink', self.layout_tests_dir()),
('chromium', self.path_from_chromium_base('build'))]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
_WDIFF_END = '##WDIFF_END##'
def _format_wdiff_output_as_html(self, wdiff):
wdiff = cgi.escape(wdiff)
wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
wdiff = wdiff.replace(self._WDIFF_END, "</span>")
html = "<head><style>.del { background: #faa; } "
html += ".add { background: #afa; }</style></head>"
html += "<pre>%s</pre>" % wdiff
return html
def _wdiff_command(self, actual_filename, expected_filename):
executable = self._path_to_wdiff()
return [executable,
"--start-delete=%s" % self._WDIFF_DEL,
"--end-delete=%s" % self._WDIFF_END,
"--start-insert=%s" % self._WDIFF_ADD,
"--end-insert=%s" % self._WDIFF_END,
actual_filename,
expected_filename]
@staticmethod
def _handle_wdiff_error(script_error):
# Exit 1 means the files differed, any other exit code is an error.
if script_error.exit_code != 1:
raise script_error
def _run_wdiff(self, actual_filename, expected_filename):
"""Runs wdiff and may throw exceptions.
This is mostly a hook for unit testing."""
# Diffs are treated as binary as they may include multiple files
# with conflicting encodings. Thus we do not decode the output.
command = self._wdiff_command(actual_filename, expected_filename)
wdiff = self._executive.run_command(command, decode_output=False,
error_handler=self._handle_wdiff_error)
return self._format_wdiff_output_as_html(wdiff)
_wdiff_error_html = "Failed to run wdiff, see error log."
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
if not self.wdiff_available():
return ""
try:
# It's possible to raise a ScriptError we pass wdiff invalid paths.
return self._run_wdiff(actual_filename, expected_filename)
except OSError as e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
# Silently ignore cases where wdiff is missing.
self._wdiff_available = False
return ""
raise
except ScriptError as e:
_log.error("Failed to run wdiff: %s" % e)
self._wdiff_available = False
return self._wdiff_error_html
# This is a class variable so we can test error output easily.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
def pretty_patch_text(self, diff_path):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
if not self._pretty_patch_available:
return self._pretty_patch_error_html
command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
self._pretty_patch_path, diff_path)
try:
# Diffs are treated as binary (we pass decode_output=False) as they
# may contain multiple files of conflicting encodings.
return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
return self._pretty_patch_error_html
except ScriptError, e:
# If ruby failed to run for some reason, log the command
# output and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
return self._pretty_patch_error_html
def default_configuration(self):
return self._config.default_configuration()
def clobber_old_port_specific_results(self):
pass
# FIXME: This does not belong on the port object.
@memoized
def path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module."""
raise NotImplementedError('Port.path_to_apache')
def path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module."""
config_file_path = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
if not config_file_path:
config_file_name = self._apache_config_file_name_for_platform(sys.platform)
config_file_path = self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
if not self._filesystem.exists(config_file_path):
raise IOError('%s was not found on the system' % config_file_path)
return config_file_path
#
# PROTECTED ROUTINES
#
# The routines below should only be called by routines in this class
# or any of its subclasses.
#
# FIXME: This belongs on some platform abstraction instead of Port.
def _is_redhat_based(self):
return self._filesystem.exists('/etc/redhat-release')
def _is_debian_based(self):
return self._filesystem.exists('/etc/debian_version')
def _apache_version(self):
config = self._executive.run_command([self.path_to_apache(), '-v'])
return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
# We pass sys_platform into this method to make it easy to unit test.
def _apache_config_file_name_for_platform(self, sys_platform):
if sys_platform == 'cygwin':
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if sys_platform.startswith('linux'):
if self._is_redhat_based():
return 'fedora-httpd-' + self._apache_version() + '.conf'
if self._is_debian_based():
return 'debian-httpd-' + self._apache_version() + '.conf'
# All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
return "apache2-httpd.conf"
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver."""
return self._build_path(self.driver_name())
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()"""
return self._build_path('image_diff')
@memoized
def _path_to_wdiff(self):
"""Returns the full path to the wdiff binary, or None if it is not available.
This is likely used only by wdiff_text()"""
for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
if self._filesystem.exists(path):
return path
return None
def _webkit_baseline_path(self, platform):
"""Return the full path to the top of the baseline tree for a
given platform."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def _output_contains_sanitizer_messages(self, output):
if not output:
return None
if 'AddressSanitizer' in output:
return 'AddressSanitizer'
if 'MemorySanitizer' in output:
return 'MemorySanitizer'
return None
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
if self._output_contains_sanitizer_messages(stderr):
# Running the symbolizer script can take a lot of memory, so we need to
# serialize access to it across all the concurrently running drivers.
# FIXME: investigate using LLVM_SYMBOLIZER_PATH here to reduce the overhead.
sanitizer_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
sanitizer_strip_path_prefix = 'Release/../../'
if self._filesystem.exists(sanitizer_filter_path):
stderr = self._executive.run_command(['flock', sys.executable, sanitizer_filter_path, sanitizer_strip_path_prefix], input=stderr, decode_output=False)
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
'\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def look_for_new_samples(self, unresponsive_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def physical_test_suites(self):
return [
# For example, to turn on force-compositing-mode in the svg/ directory:
# PhysicalTestSuite('svg',
# ['--force-compositing-mode']),
]
def virtual_test_suites(self):
return [
VirtualTestSuite('gpu',
'fast/canvas',
['--enable-accelerated-2d-canvas']),
VirtualTestSuite('gpu',
'canvas/philip',
['--enable-accelerated-2d-canvas']),
VirtualTestSuite('threaded',
'compositing/visibility',
['--enable-threaded-compositing']),
VirtualTestSuite('threaded',
'compositing/webgl',
['--enable-threaded-compositing']),
VirtualTestSuite('deferred',
'fast/images',
['--enable-deferred-image-decoding',
'--enable-per-tile-painting']),
VirtualTestSuite('deferred',
'inspector/timeline',
['--enable-deferred-image-decoding',
'--enable-per-tile-painting']),
VirtualTestSuite('deferred',
'inspector/tracing',
['--enable-deferred-image-decoding',
'--enable-per-tile-painting']),
VirtualTestSuite('gpu/compositedscrolling/overflow',
'compositing/overflow',
['--enable-prefer-compositing-to-lcd-text'],
use_legacy_naming=True),
VirtualTestSuite('gpu/compositedscrolling/scrollbars',
'scrollbars',
['--enable-prefer-compositing-to-lcd-text'],
use_legacy_naming=True),
VirtualTestSuite('threaded',
'animations',
['--enable-threaded-compositing']),
VirtualTestSuite('threaded',
'transitions',
['--enable-threaded-compositing']),
VirtualTestSuite('stable',
'webexposed',
['--stable-release-mode']),
VirtualTestSuite('stable',
'animations-unprefixed',
['--stable-release-mode']),
VirtualTestSuite('stable',
'media/stable',
['--stable-release-mode']),
VirtualTestSuite('android',
'fullscreen',
['--enable-threaded-compositing',
'--enable-fixed-position-compositing', '--enable-prefer-compositing-to-lcd-text',
'--enable-composited-scrolling-for-frames', '--enable-gesture-tap-highlight', '--enable-pinch',
'--enable-overlay-fullscreen-video', '--enable-overlay-scrollbars', '--enable-overscroll-notifications',
'--enable-fixed-layout', '--enable-viewport', '--disable-canvas-aa',
'--disable-composited-antialiasing']),
VirtualTestSuite('implsidepainting',
'inspector/timeline',
['--enable-threaded-compositing', '--enable-impl-side-painting']),
VirtualTestSuite('implsidepainting',
'inspector/tracing',
['--enable-threaded-compositing', '--enable-impl-side-painting']),
VirtualTestSuite('stable',
'fast/css3-text/css3-text-decoration/stable',
['--stable-release-mode']),
VirtualTestSuite('stable',
'web-animations-api',
['--stable-release-mode']),
VirtualTestSuite('linux-subpixel',
'platform/linux/fast/text/subpixel',
['--enable-webkit-text-subpixel-positioning']),
VirtualTestSuite('antialiasedtext',
'fast/text',
['--enable-direct-write',
'--enable-font-antialiasing']),
VirtualTestSuite('threaded',
'printing',
['--enable-threaded-compositing']),
VirtualTestSuite('regionbasedmulticol',
'fast/multicol',
['--enable-region-based-columns']),
VirtualTestSuite('regionbasedmulticol',
'fast/pagination',
['--enable-region-based-columns']),
]
@memoized
def populated_virtual_test_suites(self):
suites = self.virtual_test_suites()
# Sanity-check the suites to make sure they don't point to other suites.
suite_dirs = [suite.name for suite in suites]
for suite in suites:
assert suite.base not in suite_dirs
for suite in suites:
base_tests = self._real_tests([suite.base])
suite.tests = {}
for test in base_tests:
suite.tests[test.replace(suite.base, suite.name, 1)] = test
return suites
def _virtual_tests(self, paths, suites):
virtual_tests = list()
for suite in suites:
if paths:
for test in suite.tests:
if any(test.startswith(p) for p in paths):
virtual_tests.append(test)
else:
virtual_tests.extend(suite.tests.keys())
return virtual_tests
def is_virtual_test(self, test_name):
return bool(self.lookup_virtual_suite(test_name))
def lookup_virtual_suite(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return suite
return None
def lookup_virtual_test_base(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return test_name.replace(suite.name, suite.base, 1)
def lookup_virtual_test_args(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def lookup_physical_test_args(self, test_name):
for suite in self.physical_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
return True
def _modules_to_search_for_symbols(self):
path = self._path_to_webcore_library()
if path:
return [path]
return []
def _symbols_string(self):
symbols = ''
for path_to_module in self._modules_to_search_for_symbols():
try:
symbols += self._executive.run_command(['nm', path_to_module], error_handler=self._executive.ignore_error)
except OSError, e:
_log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
return symbols
# Ports which use compile-time feature detection should define this method and return
# a dictionary mapping from symbol substrings to possibly disabled test directories.
# When the symbol substrings are not matched, the directories will be skipped.
# If ports don't ever enable certain features, then those directories can just be
# in the Skipped list instead of compile-time-checked here.
def _missing_symbol_to_skipped_tests(self):
if self.PORT_HAS_AUDIO_CODECS_BUILT_IN:
return {}
else:
return {
"ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
"ff_aac_decoder": ["webaudio/codec-tests/aac"],
}
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
directories = itertools.chain.from_iterable(directory_lists)
for directory, test in itertools.product(directories, test_list):
if test.startswith(directory):
return True
return False
def _skipped_tests_for_unsupported_features(self, test_list):
# Only check the symbols of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the calling nm.
# Runtime feature detection not supported, fallback to static detection:
# Disable any tests for symbols missing from the executable or libraries.
if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
symbols_string = self._symbols_string()
if symbols_string is not None:
return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
return []
def _convert_path(self, path):
"""Handles filename conversion for subprocess command line args."""
# See note above in diff_image() for why we need this.
if sys.platform == 'cygwin':
return cygpath(path)
return path
def gen_dir(self):
return self._build_path("gen")
def _build_path(self, *comps):
return self._build_path_with_configuration(None, *comps)
def _build_path_with_configuration(self, configuration, *comps):
# Note that we don't do the option caching that the
# base class does, because finding the right directory is relatively
# fast.
configuration = configuration or self.get_option('configuration')
return self._static_build_path(self._filesystem, self.get_option('build_directory'),
self.path_from_chromium_base(), configuration, comps)
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and configuration == 'Release' or
release_mtime > debug_mtime and configuration == 'Debug'):
most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
_log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
return self.path_from_webkit_base('tests', 'platform', platform)
class VirtualTestSuite(object):
def __init__(self, name, base, args, use_legacy_naming=False, tests=None):
if use_legacy_naming:
self.name = 'virtual/' + name
else:
if name.find('/') != -1:
_log.error("Virtual test suites names cannot contain /'s: %s" % name)
return
self.name = 'virtual/' + name + '/' + base
self.base = base
self.args = args
self.tests = tests or set()
def __repr__(self):
return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
class PhysicalTestSuite(object):
def __init__(self, base, args):
self.name = base
self.base = base
self.args = args
self.tests = set()
def __repr__(self):
return "PhysicalTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
| 43.986827 | 189 | 0.642179 |
fe7ec3f5152913a84a02cb96cc876646ef913e05 | 4,204 | py | Python | etc/scripts/licenses/golic.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1,511 | 2015-07-01T15:29:03.000Z | 2022-03-30T13:40:05.000Z | etc/scripts/licenses/golic.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | etc/scripts/licenses/golic.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 540 | 2015-07-01T15:08:19.000Z | 2022-03-31T12:13:11.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
import attr
import saneyaml
from commoncode.text import python_safe_name
from licensedcode.cache import get_spdx_symbols
@attr.s
class Test(object):
location = attr.ib(None)
filename = attr.ib(None)
license_key = attr.ib(default=None)
text = attr.ib(default=None)
coverage = attr.ib(default=0)
notes = attr.ib(default=None)
def collect_tests(location):
for filename in os.listdir(location):
if filename in ('README', 'ccurls.t1'):
continue
loc = os.path.join(location, filename)
data = open(loc).read()
header, _, text = data.partition('\n\n')
expected_lic = None
coverage = 0
comments = []
for line in header.splitlines(False):
if line.startswith('#'):
comments.append(line.strip('#'))
elif line.endswith('%'):
coverage = float(line.strip('%'))
else:
expected_lic, _, _ = line.partition(' ')
expected_lic = expected_lic.strip()
test = Test(
location=loc,
filename=filename,
coverage=coverage,
license_key=expected_lic,
text=text,
notes='\n'.join(c.strip() for c in comments),
)
yield test
def collect_url_tests(location):
# the urls file 'ccurls.t1' is special
ccurl = 'ccurls.t1'
data = open(os.path.join(location, '..', ccurl)).read()
lics, _, urls = data.partition('\n\n')
lics = (e for e in lics.splitlines(False) if not e.endswith('%'))
for i, lic in enumerate(lics):
expected_lic, offsets, _ = lic.split()
start, end = offsets.split(',')
text = urls[int(start):int(end)]
expected_lic = expected_lic.strip()
fn = python_safe_name(expected_lic)
yield Test(
location=os.path.join(location, f'url_{fn}_{i}.txt'),
filename=ccurl,
text=text,
license_key=expected_lic,
notes='This is a URL test extracted from ccurls.t1.'
)
# a bunh of non-spadx license keys
extra_license_keys = {
'aladdin-9': 'afpl-9.0',
'anti996': '996-icu-1.0',
'bsd-1-clause-clear': 'unknown',
'bsd-3-clause-notrademark': 'unknown',
'commonsclause': 'unknown',
'cc-by-nc-sa-3.0-us': 'unknown',
'lgpl-2.0-or-3.0': 'unknown',
'googlepatentclause': 'unknown',
'googlepatentsfile': 'unknown',
'mit-noad': 'unknown',
'prosperity-3.0.0': 'unknown',
}
def generate_license_tests(location):
# map their keys to ours
license_mapping = {spdx: l.key for spdx, l in get_spdx_symbols().items()}
license_mapping.update(extra_license_keys)
for test in list(collect_tests(location)) + list(collect_url_tests(location)):
loc = test.location
print(f'Processing: {loc}')
with open(loc, 'w') as txt:
txt.write(test.text)
lickey = test.license_key
lickey = lickey and lickey.lower() or None
lickey = license_mapping.get(lickey)
lickey = lickey or 'unknown'
url = f'https://raw.githubusercontent.com/google/licensecheck/v0.3.1/testdata/{test.filename}'
with open(loc + '.yml', 'w') as td:
data = dict(
license_expressions=[lickey],
notes=(
f'License test derived from a file of the BSD-licensed repository at:\n' +
f'{url}\n' +
f'originally expected to be detected as {test.license_key}\n' +
f'with coverage of {test.coverage}\n' +
(test.notes or '')
)
)
td.write(saneyaml.dump(data))
if __name__ == '__main__':
import sys
generate_license_tests(sys.argv[1])
| 30.028571 | 102 | 0.588963 |
d8344232260d1f664ce2f2ad7d8cc8322e4632ba | 125 | py | Python | genesis/__init__.py | zznop/bn-genesis | f198143425184010c0e3894f004a8de34f3ccaf6 | [
"MIT"
] | 24 | 2019-03-10T15:33:36.000Z | 2022-03-14T04:04:38.000Z | genesis/__init__.py | zznop/bn-genesis | f198143425184010c0e3894f004a8de34f3ccaf6 | [
"MIT"
] | 3 | 2019-07-04T20:10:06.000Z | 2020-04-25T03:28:23.000Z | genesis/__init__.py | zznop/bn-genesis | f198143425184010c0e3894f004a8de34f3ccaf6 | [
"MIT"
] | null | null | null | from .loader import *
from .checksum import *
from .assemble import *
from .call_table_enum import *
GenesisView.register()
| 17.857143 | 30 | 0.768 |
659eaba0ae8558df49599ec1ac62501a3ec6cf8e | 2,210 | py | Python | pettingzoo/gamma/prospector/manual_control.py | AnanthHari/PettingZoo | c147c2992a067fd529570db0bea6a0324f01ee6e | [
"MIT"
] | null | null | null | pettingzoo/gamma/prospector/manual_control.py | AnanthHari/PettingZoo | c147c2992a067fd529570db0bea6a0324f01ee6e | [
"MIT"
] | null | null | null | pettingzoo/gamma/prospector/manual_control.py | AnanthHari/PettingZoo | c147c2992a067fd529570db0bea6a0324f01ee6e | [
"MIT"
] | null | null | null | import pygame
import numpy as np
from . import constants as const
# from prospector import constants as const
def manual_control(**kwargs):
from .prospector import env as _env
env = _env(**kwargs)
env.reset()
default_scalar = 1
agent = 0
done = False
quit_while = False
while not done:
agent_actions = np.array(
[[0, 0, 0] for _ in range(const.NUM_PROSPECTORS)]
+ [[0, 0, 0] for _ in range(const.NUM_BANKERS)]
)
for event in pygame.event.get():
# Use left/right arrow keys to switch between agents
# Use WASD to control bankers
# Use WASD and QE to control prospectors
# Note: QE while selecting a banker has no effect.
if event.type == pygame.KEYDOWN:
# Agent selection
if event.key == pygame.K_LEFT:
agent = (agent - 1) % const.NUM_AGENTS
elif event.key == pygame.K_RIGHT:
agent = (agent + 1) % const.NUM_AGENTS
# Forward/backward or up/down movement
elif event.key == pygame.K_w:
agent_actions[agent][0] = default_scalar
elif event.key == pygame.K_s:
agent_actions[agent][0] = -default_scalar
# left/right movement
elif event.key == pygame.K_a:
agent_actions[agent][1] = -default_scalar
elif event.key == pygame.K_d:
agent_actions[agent][1] = default_scalar
# rotation
elif event.key == pygame.K_q:
if 0 <= agent <= 3:
agent_actions[agent][2] = default_scalar
elif event.key == pygame.K_e:
if 0 <= agent <= 3:
agent_actions[agent][2] = -default_scalar
elif event.key == pygame.K_ESCAPE:
done = True
quit_while = True
if quit_while:
break
for a in agent_actions:
env.step(a, observe=False)
env.render()
done = any(env.dones.values())
env.close()
| 35.645161 | 65 | 0.515385 |
752c12c0410b8b7f953fe42e7bb892ac59dd18ba | 11,156 | py | Python | pdoc/extract.py | Bruntaz/pdoc | 60e4b6d9666f88dab7739cf31c639a45b6a8b11b | [
"Unlicense"
] | null | null | null | pdoc/extract.py | Bruntaz/pdoc | 60e4b6d9666f88dab7739cf31c639a45b6a8b11b | [
"Unlicense"
] | null | null | null | pdoc/extract.py | Bruntaz/pdoc | 60e4b6d9666f88dab7739cf31c639a45b6a8b11b | [
"Unlicense"
] | null | null | null | """
This module handles the interaction with Python's module system,
that is it loads the correct module based on whatever the user specified,
and provides the rest of pdoc with some additional module metadata.
"""
from __future__ import annotations
import importlib
import importlib.util
import io
import linecache
import os
import pkgutil
import platform
import subprocess
import sys
import traceback
import types
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Iterable, Iterator, Optional, Sequence, Union
from unittest.mock import patch
from . import doc_ast, docstrings
def walk_specs(specs: Sequence[Union[Path, str]]) -> dict[str, None]:
"""
This function processes a list of module specifications and returns a collection of module names, including all
submodules, that should be processed by pdoc.
A module specification can either be the name of an installed module, or the path to a specific file or package.
For example, the following strings are valid module specifications:
- `typing`
- `collections.abc`
- `./test/testdata/demo_long.py`
- `./test/testdata/demopackage`
Practically you can think of this function returning a list. Technically we return a dict with empty values,
which has efficient `__iter__` and `__contains__` implementations.
*This function has side-effects:* See `parse_spec`.
"""
all_modules: dict[str, None] = {}
for spec in specs:
modname = parse_spec(spec)
try:
with mock_some_common_side_effects():
modspec = importlib.util.find_spec(modname)
if modspec is None:
raise ModuleNotFoundError(modname)
except AnyException:
warnings.warn(
f"Cannot find spec for {modname} (from {spec}):\n{traceback.format_exc()}",
RuntimeWarning,
stacklevel=2,
)
else:
mod_info = pkgutil.ModuleInfo(
None, # type: ignore
name=modname,
ispkg=bool(modspec.submodule_search_locations),
)
for m in walk_packages2([mod_info]):
all_modules[m.name] = None
if not all_modules:
raise ValueError(f"Module not found: {', '.join(str(x) for x in specs)}.")
return all_modules
def parse_spec(spec: Union[Path, str]) -> str:
"""
This functions parses a user's module specification into a module identifier that can be imported.
If both a local file/directory and an importable module with the same name exist, a warning will be printed.
*This function has side-effects:* `sys.path` will be amended if the specification is a path.
If this side-effect is undesired, pass a module name instead.
"""
pspec = Path(spec)
if isinstance(spec, str) and (os.sep in spec or (os.altsep and os.altsep in spec)):
# We have a path separator, so it's definitely a filepath.
spec = pspec
if isinstance(spec, str) and (pspec.is_file() or (pspec / "__init__.py").is_file()):
# We have a local file with this name, but is there also a module with the same name?
try:
with mock_some_common_side_effects():
modspec = importlib.util.find_spec(spec)
if modspec is None:
raise ModuleNotFoundError
except AnyException:
# Module does not exist, use local file.
spec = pspec
else:
# Module does exist. We now check if the local file/directory is the same (e.g. after pip install -e),
# and emit a warning if that's not the case.
origin = (
Path(modspec.origin).absolute() if modspec.origin else Path("unknown")
)
local_dir = Path(spec).absolute()
if local_dir not in (origin, origin.parent):
print(
f"Warning: {spec!r} may refer to either the installed Python module or the local file/directory "
f"with the same name. pdoc will document the installed module, prepend './' to force "
f"documentation of the local file/directory.\n"
f" - Module location: {origin}\n"
f" - Local file/directory: {local_dir}",
file=sys.stderr,
)
if isinstance(spec, Path):
if (spec.parent / "__init__.py").exists():
return parse_spec(spec.parent) + f".{spec.stem}"
if str(spec.parent) not in sys.path:
sys.path.insert(0, str(spec.parent))
if spec.stem in sys.modules:
local_dir = spec.absolute()
origin = Path(sys.modules[spec.stem].__file__).absolute()
if local_dir not in (origin, origin.parent):
print(
f"Warning: pdoc cannot load {spec.stem!r} because a module with the same name is already "
f"imported in pdoc's Python process. pdoc will document the loaded module from {origin} instead.",
file=sys.stderr,
)
return spec.stem
else:
return spec
@contextmanager
def mock_some_common_side_effects():
"""
This context manager is applied when importing modules. It mocks some common side effects that may happen upon
module import. For example, `import antigravity` normally causes a webbrowser to open, which we want to suppress.
Note that this function must not be used for security purposes, it's easily bypassable.
"""
if platform.system() == "Windows": # pragma: no cover
noop_exe = "echo.exe"
else: # pragma: no cover
noop_exe = "echo"
def noop(*args, **kwargs):
pass
class PdocDefusedPopen(subprocess.Popen):
def __init__(self, *args, **kwargs): # pragma: no cover
kwargs["executable"] = noop_exe
super().__init__(*args, **kwargs)
with patch("subprocess.Popen", new=PdocDefusedPopen), patch(
"os.startfile", new=noop, create=True
), patch("sys.stdout", new=io.StringIO()), patch(
"sys.stderr", new=io.StringIO()
), patch(
"sys.stdin", new=io.StringIO()
):
yield
@mock_some_common_side_effects()
def load_module(module: str) -> types.ModuleType:
"""Try to import a module. If import fails, a RuntimeError is raised.
Returns the imported module."""
try:
return importlib.import_module(module)
except AnyException as e:
raise RuntimeError(f"Error importing {module}") from e
AnyException = (SystemExit, GeneratorExit, Exception)
"""BaseException, but excluding KeyboardInterrupt.
Modules may raise SystemExit on import (which we want to catch),
but we don't want to catch a user's KeyboardInterrupt.
"""
def _all_submodules(modulename: str) -> bool:
return True
def walk_packages2(
modules: Iterable[pkgutil.ModuleInfo],
module_filter: Callable[[str], bool] = _all_submodules,
) -> Iterator[pkgutil.ModuleInfo]:
"""
For a given list of modules, recursively yield their names and all their submodules' names.
This function is similar to `pkgutil.walk_packages`, but respects a package's `__all__` attribute if specified.
If `__all__` is defined, submodules not listed in `__all__` are excluded.
"""
# noinspection PyDefaultArgument
def seen(p, m={}): # pragma: no cover
if p in m:
return True
m[p] = True
for mod in modules:
# is __all__ defined and the module not in __all__?
if not module_filter(mod.name.rpartition(".")[2]):
continue
yield mod
if mod.ispkg:
try:
module = load_module(mod.name)
except RuntimeError:
warnings.warn(
f"Error loading {mod.name}:\n{traceback.format_exc()}",
RuntimeWarning,
)
continue
mod_all: list[str] = getattr(module, "__all__", None)
if mod_all is not None:
filt = mod_all.__contains__
else:
filt = _all_submodules
# don't traverse path items we've seen before
path = [p for p in (getattr(module, "__path__", None) or []) if not seen(p)]
yield from walk_packages2(pkgutil.iter_modules(path, f"{mod.name}."), filt)
def module_mtime(modulename: str) -> Optional[float]:
"""Returns the time the specified module file was last modified, or `None` if this cannot be determined.
The primary use of this is live-reloading modules on modification."""
try:
with mock_some_common_side_effects():
spec = importlib.util.find_spec(modulename)
except AnyException:
pass
else:
if spec is not None and spec.origin is not None:
return Path(spec.origin).stat().st_mtime
return None
def invalidate_caches(module_name: str) -> None:
"""
Invalidate module cache to allow live-reloading of modules.
"""
# Getting this right is tricky – reloading modules causes a bunch of surprising side-effects.
# Our current best effort is to call `importlib.reload` on all modules that start with module_name.
# We also exclude our own dependencies, which cause fun errors otherwise.
if module_name not in sys.modules:
return
if any(
module_name.startswith(f"{x}.") or x == module_name
for x in ("jinja2", "markupsafe", "markdown2", "pygments")
):
return
# a more extreme alternative:
# filename = sys.modules[module_name].__file__
# if (
# filename.startswith(sysconfig.get_path("platstdlib"))
# or filename.startswith(sysconfig.get_path("stdlib"))
# ):
# return
importlib.invalidate_caches()
linecache.clearcache()
doc_ast._get_source.cache_clear()
docstrings.convert.cache_clear()
prefix = f"{module_name}."
mods = sorted(
mod for mod in sys.modules if module_name == mod or mod.startswith(prefix)
)
for modname in mods:
if modname == "pdoc.render":
# pdoc.render is stateful after configure(), so we don't want to reload it.
continue
try:
if not isinstance(sys.modules[modname], types.ModuleType):
continue # some funky stuff going on - one example is typing.io, which is a class.
with mock_some_common_side_effects():
importlib.reload(sys.modules[modname])
except AnyException:
warnings.warn(
f"Error reloading {modname}:\n{traceback.format_exc()}",
RuntimeWarning,
stacklevel=2,
)
def parse_specs(
modules: Sequence[Union[Path, str]]
) -> dict[str, None]: # pragma: no cover
"""A deprecated alias for `walk_specs`."""
warnings.warn(
"pdoc.extract.parse_specs has been renamed to pdoc.extract.walk_specs",
PendingDeprecationWarning,
)
return walk_specs(modules)
| 36.220779 | 118 | 0.629437 |
d7228a80b62953d924be7c6dedcd206a9d1d595d | 922 | py | Python | tests/components/cloud/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/cloud/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/cloud/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | """Tests for the cloud component."""
from unittest.mock import patch
from openpeerpower.components import cloud
from openpeerpower.components.cloud import const
from openpeerpower.setup import async_setup_component
from tests.common import mock_coro
async def mock_cloud(opp, config=None):
"""Mock cloud."""
assert await async_setup_component(opp, cloud.DOMAIN, {"cloud": config or {}})
cloud_inst = opp.data["cloud"]
with patch("opp_nabucasa.Cloud.run_executor", return_value=mock_coro()):
await cloud_inst.start()
def mock_cloud_prefs(opp, prefs={}):
"""Fixture for cloud component."""
prefs_to_set = {
const.PREF_ENABLE_ALEXA: True,
const.PREF_ENABLE_GOOGLE: True,
const.PREF_GOOGLE_SECURE_DEVICES_PIN: None,
}
prefs_to_set.update(prefs)
opp.data[cloud.DOMAIN].client._prefs._prefs = prefs_to_set
return opp.data[cloud.DOMAIN].client._prefs
| 31.793103 | 82 | 0.73102 |
710ea4975e99459f4a82717b46e5a5f5c9c51b95 | 625 | py | Python | setup.py | Gscorreia89/pyChemometrics | 16f3b4a1af873cf7240230439b503c5aee751ce7 | [
"BSD-3-Clause"
] | 22 | 2017-11-27T13:24:42.000Z | 2022-01-14T18:09:23.000Z | setup.py | Gscorreia89/pyChemometrics | 16f3b4a1af873cf7240230439b503c5aee751ce7 | [
"BSD-3-Clause"
] | 1 | 2018-04-23T11:12:28.000Z | 2018-04-23T11:12:28.000Z | setup.py | Gscorreia89/pyChemometrics | 16f3b4a1af873cf7240230439b503c5aee751ce7 | [
"BSD-3-Clause"
] | 13 | 2017-11-27T13:23:51.000Z | 2021-06-23T17:35:44.000Z | from setuptools import setup
setup(
name='pyChemometrics',
version='0.13.5',
packages=['pyChemometrics'],
url='https://github.com/Gscorreia89/pyChemometrics/',
documentation='http://pychemometrics.readthedocs.io/en/stable/',
license='BSD 3-Clause License',
author='Gonçalo Correia',
setup_requires=['wheel'],
author_email='[email protected]',
description='The pyChemometrics provides objects which wrap pre-existing '
'scikit-learn PCA and PLS algorithms and adds model assessment metrics and functions '
'common in the Chemometrics literature.'
)
| 36.764706 | 102 | 0.6976 |
ba8d72c7c300dd65e3b54715fe6fa23ecc1037dc | 551 | py | Python | UsbDetector.py | devoctomy/piperformancerecorder | d1d1716a72f339ebfde502d47e11d70b6a0e1ee5 | [
"MIT"
] | null | null | null | UsbDetector.py | devoctomy/piperformancerecorder | d1d1716a72f339ebfde502d47e11d70b6a0e1ee5 | [
"MIT"
] | null | null | null | UsbDetector.py | devoctomy/piperformancerecorder | d1d1716a72f339ebfde502d47e11d70b6a0e1ee5 | [
"MIT"
] | null | null | null | import threading
class UsbDetector():
def __init__(self):
thread = threading.Thread(target=self._work)
thread.daemon = True
thread.start()
def _work(self):
self.context = pyudev.Context()
self.monitor = pyudev.Monitor.from_netlink(self.context)
self.monitor.filter_by(subsystem='usb')
self.monitor.start()
for device in iter(self.monitor.poll, None):
if device.action == 'add':
self.on_created()
else:
self.on_deleted()
| 29 | 64 | 0.586207 |
779a0eac0b0f647e9dc8a17eaea365b730f8d407 | 419 | py | Python | wolfram-query.py | alebml/athena | b5e7603ff830eef43469ffc32d39a1260e50bf0b | [
"MIT"
] | null | null | null | wolfram-query.py | alebml/athena | b5e7603ff830eef43469ffc32d39a1260e50bf0b | [
"MIT"
] | null | null | null | wolfram-query.py | alebml/athena | b5e7603ff830eef43469ffc32d39a1260e50bf0b | [
"MIT"
] | null | null | null | import wolframalpha
app_id = "L4YVH6-HPV69WKAWQ"
def wolframQuery(_input, _appid = "L4YVH6-HPV69WKAWQ"):
client = wolframalpha.Client(_appid)
res = client.query(str(_input))
try:
(next(res.results).text)
return str((next(res.results).text))
except:
return "Input failed."
def testWolframQuery():
print(wolframQuery("What is 2 + 2?"))
def main():
testWolframQuery()
if __name__ == "__main__":
main() | 19.045455 | 55 | 0.706444 |
d66c311ef63fa07f2b31cbdd8a9dd92c00f92003 | 1,483 | py | Python | Tutorials/02_drive_square.py | Iceman1590/AT-TVectorAirgig | 7a3fb03ba9c2dd53108d6e8164d36938e56187e1 | [
"Apache-2.0"
] | null | null | null | Tutorials/02_drive_square.py | Iceman1590/AT-TVectorAirgig | 7a3fb03ba9c2dd53108d6e8164d36938e56187e1 | [
"Apache-2.0"
] | null | null | null | Tutorials/02_drive_square.py | Iceman1590/AT-TVectorAirgig | 7a3fb03ba9c2dd53108d6e8164d36938e56187e1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Make Vector drive in a square.
Make Vector drive in a square by going forward and turning left 4 times in a row.
"""
import anki_vector
from anki_vector.util import degrees, distance_mm, speed_mmps
def main():
args = anki_vector.util.parse_command_args()
# The robot drives straight, stops and then turns around
with anki_vector.Robot(args.serial) as robot:
robot.behavior.drive_off_charger()
# Use a "for loop" to repeat the indented code 4 times
# Note: the _ variable name can be used when you don't need the value
for _ in range(4):
print("Drive Vector straight...")
robot.behavior.drive_straight(distance_mm(150), speed_mmps(50))
print("Turn Vector in place...")
robot.behavior.turn_in_place(degrees(90))
if __name__ == "__main__":
main()
| 32.955556 | 81 | 0.70735 |
cb78a06dcc72b83c7b96330814202e8f7786e009 | 62,630 | py | Python | zerver/lib/export.py | Rishabh570/zulip | 0600646fbfdcfb20c0c0d47950690a6efac873aa | [
"Apache-2.0"
] | null | null | null | zerver/lib/export.py | Rishabh570/zulip | 0600646fbfdcfb20c0c0d47950690a6efac873aa | [
"Apache-2.0"
] | 11 | 2021-02-08T20:59:55.000Z | 2022-03-12T00:51:41.000Z | zerver/lib/export.py | usmanmuhd/zulip | 0600646fbfdcfb20c0c0d47950690a6efac873aa | [
"Apache-2.0"
] | null | null | null | import datetime
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from django.conf import settings
from django.db import connection
from django.forms.models import model_to_dict
from django.utils.timezone import make_aware as timezone_make_aware
from django.utils.timezone import utc as timezone_utc
from django.utils.timezone import is_naive as timezone_is_naive
from django.db.models.query import QuerySet
import glob
import logging
import os
import ujson
import shutil
import subprocess
import tempfile
from zerver.lib.avatar_hash import user_avatar_hash, user_avatar_path_from_ids
from zerver.lib.create_user import random_api_key
from zerver.models import UserProfile, Realm, Client, Huddle, Stream, \
UserMessage, Subscription, Message, RealmEmoji, RealmFilter, \
RealmDomain, Recipient, DefaultStream, get_user_profile_by_id, \
UserPresence, UserActivity, UserActivityInterval, \
get_display_recipient, Attachment, get_system_bot
from zerver.lib.parallel import run_parallel
from zerver.lib.utils import mkdir_p
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
# Custom mypy types follow:
Record = Dict[str, Any]
TableName = str
TableData = Dict[TableName, List[Record]]
Field = str
Path = str
Context = Dict[str, Any]
FilterArgs = Dict[str, Any]
IdSource = Tuple[TableName, Field]
SourceFilter = Callable[[Record], bool]
# These next two types are callbacks, which mypy does not
# support well, because PEP 484 says "using callbacks
# with keyword arguments is not perceived as a common use case."
# CustomFetch = Callable[[TableData, Config, Context], None]
# PostProcessData = Callable[[TableData, Config, Context], None]
CustomFetch = Any # TODO: make more specific, see above
PostProcessData = Any # TODO: make more specific
# The keys of our MessageOutput variables are normally
# List[Record], but when we write partials, we can get
# lists of integers or a single integer.
# TODO: tighten this up with a union.
MessageOutput = Dict[str, Any]
realm_tables = [("zerver_defaultstream", DefaultStream),
("zerver_realmemoji", RealmEmoji),
("zerver_realmdomain", RealmDomain),
("zerver_realmfilter", RealmFilter)] # List[Tuple[TableName, Any]]
ALL_ZERVER_TABLES = [
# TODO: get a linter to ensure that this list is actually complete.
'zerver_attachment',
'zerver_attachment_messages',
'zerver_client',
'zerver_defaultstream',
'zerver_huddle',
'zerver_message',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_realm',
'zerver_realmdomain',
'zerver_realmemoji',
'zerver_realmfilter',
'zerver_recipient',
'zerver_scheduledemail',
'zerver_stream',
'zerver_subscription',
'zerver_useractivity',
'zerver_useractivityinterval',
'zerver_usermessage',
'zerver_userpresence',
'zerver_userprofile',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
]
NON_EXPORTED_TABLES = [
# These are known to either be altogether obsolete or
# simply inappropriate for exporting (e.g. contains transient
# data).
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_scheduledemail',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
]
assert set(NON_EXPORTED_TABLES).issubset(set(ALL_ZERVER_TABLES))
IMPLICIT_TABLES = [
# ManyToMany relationships are exported implicitly.
'zerver_attachment_messages',
]
assert set(IMPLICIT_TABLES).issubset(set(ALL_ZERVER_TABLES))
ATTACHMENT_TABLES = [
'zerver_attachment',
]
assert set(ATTACHMENT_TABLES).issubset(set(ALL_ZERVER_TABLES))
MESSAGE_TABLES = [
# message tables get special treatment, because they're so big
'zerver_message',
'zerver_usermessage',
]
DATE_FIELDS = {
'zerver_attachment': ['create_time'],
'zerver_message': ['last_edit_time', 'pub_date'],
'zerver_realm': ['date_created'],
'zerver_stream': ['date_created'],
'zerver_useractivity': ['last_visit'],
'zerver_useractivityinterval': ['start', 'end'],
'zerver_userpresence': ['timestamp'],
'zerver_userprofile': ['date_joined', 'last_login', 'last_reminder'],
} # type: Dict[TableName, List[Field]]
def sanity_check_output(data):
# type: (TableData) -> None
tables = set(ALL_ZERVER_TABLES)
tables -= set(NON_EXPORTED_TABLES)
tables -= set(IMPLICIT_TABLES)
tables -= set(MESSAGE_TABLES)
tables -= set(ATTACHMENT_TABLES)
for table in tables:
if table not in data:
logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,))
def write_data_to_file(output_file, data):
# type: (Path, Any) -> None
with open(output_file, "w") as f:
f.write(ujson.dumps(data, indent=4))
def make_raw(query, exclude=None):
# type: (Any, List[Field]) -> List[Record]
'''
Takes a Django query and returns a JSONable list
of dictionaries corresponding to the database rows.
'''
rows = []
for instance in query:
data = model_to_dict(instance, exclude=exclude)
"""
In Django 1.11.5, model_to_dict evaluates the QuerySet of
many-to-many field to give us a list of instances. We require
a list of primary keys, so we get the primary keys from the
instances below.
"""
for field in instance._meta.many_to_many:
value = data[field.name]
data[field.name] = [row.id for row in value]
rows.append(data)
return rows
def floatify_datetime_fields(data, table):
# type: (TableData, TableName) -> None
for item in data[table]:
for field in DATE_FIELDS[table]:
orig_dt = item[field]
if orig_dt is None:
continue
if timezone_is_naive(orig_dt):
logging.warning("Naive datetime:", item)
dt = timezone_make_aware(orig_dt)
else:
dt = orig_dt
utc_naive = dt.replace(tzinfo=None) - dt.utcoffset()
item[field] = (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class Config(object):
'''
A Config object configures a single table for exporting (and,
maybe some day importing as well.
You should never mutate Config objects as part of the export;
instead use the data to determine how you populate other
data structures.
There are parent/children relationships between Config objects.
The parent should be instantiated first. The child will
append itself to the parent's list of children.
'''
def __init__(self, table=None, model=None,
normal_parent=None, virtual_parent=None,
filter_args=None, custom_fetch=None, custom_tables=None,
post_process_data=None,
concat_and_destroy=None, id_source=None, source_filter=None,
parent_key=None, use_all=False, is_seeded=False, exclude=None):
# type: (str, Any, Config, Config, FilterArgs, CustomFetch, List[TableName], PostProcessData, List[TableName], IdSource, SourceFilter, Field, bool, bool, List[Field]) -> None
assert table or custom_tables
self.table = table
self.model = model
self.normal_parent = normal_parent
self.virtual_parent = virtual_parent
self.filter_args = filter_args
self.parent_key = parent_key
self.use_all = use_all
self.is_seeded = is_seeded
self.exclude = exclude
self.custom_fetch = custom_fetch
self.custom_tables = custom_tables
self.post_process_data = post_process_data
self.concat_and_destroy = concat_and_destroy
self.id_source = id_source
self.source_filter = source_filter
self.children = [] # type: List[Config]
if normal_parent is not None:
self.parent = normal_parent # type: Optional[Config]
else:
self.parent = None
if virtual_parent is not None and normal_parent is not None:
raise ValueError('''
If you specify a normal_parent, please
do not create a virtual_parent.
''')
if normal_parent is not None:
normal_parent.children.append(self)
elif virtual_parent is not None:
virtual_parent.children.append(self)
elif is_seeded is None:
raise ValueError('''
You must specify a parent if you are
not using is_seeded.
''')
if self.id_source is not None:
if self.virtual_parent is None:
raise ValueError('''
You must specify a virtual_parent if you are
using id_source.''')
if self.id_source[0] != self.virtual_parent.table:
raise ValueError('''
Configuration error. To populate %s, you
want data from %s, but that differs from
the table name of your virtual parent (%s),
which suggests you many not have set up
the ordering correctly. You may simply
need to assign a virtual_parent, or there
may be deeper issues going on.''' % (
self.table,
self.id_source[0],
self.virtual_parent.table))
def export_from_config(response, config, seed_object=None, context=None):
# type: (TableData, Config, Any, Context) -> None
table = config.table
parent = config.parent
model = config.model
if context is None:
context = {}
if table:
exported_tables = [table]
else:
if config.custom_tables is None:
raise ValueError('''
You must specify config.custom_tables if you
are not specifying config.table''')
exported_tables = config.custom_tables
for t in exported_tables:
logging.info('Exporting via export_from_config: %s' % (t,))
rows = None
if config.is_seeded:
rows = [seed_object]
elif config.custom_fetch:
config.custom_fetch(
response=response,
config=config,
context=context
)
if config.custom_tables:
for t in config.custom_tables:
if t not in response:
raise Exception('Custom fetch failed to populate %s' % (t,))
elif config.concat_and_destroy:
# When we concat_and_destroy, we are working with
# temporary "tables" that are lists of records that
# should already be ready to export.
data = [] # type: List[Record]
for t in config.concat_and_destroy:
data += response[t]
del response[t]
logging.info('Deleted temporary %s' % (t,))
assert table is not None
response[table] = data
elif config.use_all:
assert model is not None
query = model.objects.all()
rows = list(query)
elif config.normal_parent:
# In this mode, our current model is figuratively Article,
# and normal_parent is figuratively Blog, and
# now we just need to get all the articles
# contained by the blogs.
model = config.model
assert parent is not None
assert parent.table is not None
assert config.parent_key is not None
parent_ids = [r['id'] for r in response[parent.table]]
filter_parms = {config.parent_key: parent_ids} # type: Dict[str, Any]
if config.filter_args is not None:
filter_parms.update(config.filter_args)
assert model is not None
query = model.objects.filter(**filter_parms)
rows = list(query)
elif config.id_source:
# In this mode, we are the figurative Blog, and we now
# need to look at the current response to get all the
# blog ids from the Article rows we fetched previously.
model = config.model
assert model is not None
# This will be a tuple of the form ('zerver_article', 'blog').
(child_table, field) = config.id_source
child_rows = response[child_table]
if config.source_filter:
child_rows = [r for r in child_rows if config.source_filter(r)]
lookup_ids = [r[field] for r in child_rows]
filter_parms = dict(id__in=lookup_ids)
if config.filter_args:
filter_parms.update(config.filter_args)
query = model.objects.filter(**filter_parms)
rows = list(query)
# Post-process rows (which won't apply to custom fetches/concats)
if rows is not None:
assert table is not None # Hint for mypy
response[table] = make_raw(rows, exclude=config.exclude)
if table in DATE_FIELDS:
floatify_datetime_fields(response, table)
if config.post_process_data:
config.post_process_data(
response=response,
config=config,
context=context
)
# Now walk our children. It's extremely important to respect
# the order of children here.
for child_config in config.children:
export_from_config(
response=response,
config=child_config,
context=context,
)
def get_realm_config():
# type: () -> Config
# This is common, public information about the realm that we can share
# with all realm users.
realm_config = Config(
table='zerver_realm',
is_seeded=True
)
Config(
table='zerver_defaultstream',
model=DefaultStream,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmemoji',
model=RealmEmoji,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmdomain',
model=RealmDomain,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmfilter',
model=RealmFilter,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_client',
model=Client,
virtual_parent=realm_config,
use_all=True
)
user_profile_config = Config(
custom_tables=[
'zerver_userprofile',
'zerver_userprofile_mirrordummy',
],
# set table for children who treat us as normal parent
table='zerver_userprofile',
virtual_parent=realm_config,
custom_fetch=fetch_user_profile,
)
Config(
custom_tables=[
'zerver_userprofile_crossrealm',
],
virtual_parent=user_profile_config,
custom_fetch=fetch_user_profile_cross_realm,
)
Config(
table='zerver_userpresence',
model=UserPresence,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivity',
model=UserActivity,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivityinterval',
model=UserActivityInterval,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# Some of these tables are intermediate "tables" that we
# create only for the export. Think of them as similar to views.
user_subscription_config = Config(
table='_user_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.PERSONAL},
parent_key='user_profile__in',
)
Config(
table='_user_recipient',
model=Recipient,
virtual_parent=user_subscription_config,
id_source=('_user_subscription', 'recipient'),
)
#
stream_subscription_config = Config(
table='_stream_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.STREAM},
parent_key='user_profile__in',
)
stream_recipient_config = Config(
table='_stream_recipient',
model=Recipient,
virtual_parent=stream_subscription_config,
id_source=('_stream_subscription', 'recipient'),
)
Config(
table='zerver_stream',
model=Stream,
virtual_parent=stream_recipient_config,
id_source=('_stream_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
post_process_data=sanity_check_stream_data
)
#
Config(
custom_tables=[
'_huddle_recipient',
'_huddle_subscription',
'zerver_huddle',
],
normal_parent=user_profile_config,
custom_fetch=fetch_huddle_objects,
)
# Now build permanent tables from our temp tables.
Config(
table='zerver_recipient',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_recipient',
'_stream_recipient',
'_huddle_recipient',
],
)
Config(
table='zerver_subscription',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_subscription',
'_stream_subscription',
'_huddle_subscription',
]
)
return realm_config
def sanity_check_stream_data(response, config, context):
# type: (TableData, Config, Context) -> None
if context['exportable_user_ids'] is not None:
# If we restrict which user ids are exportable,
# the way that we find # streams is a little too
# complex to have a sanity check.
return
actual_streams = set([stream.name for stream in Stream.objects.filter(realm=response["zerver_realm"][0]['id'])])
streams_in_response = set([stream['name'] for stream in response['zerver_stream']])
if streams_in_response != actual_streams:
print(streams_in_response - actual_streams)
print(actual_streams - streams_in_response)
raise Exception('''
zerver_stream data does not match
Stream.objects.all().
Please investigate!
''')
def fetch_user_profile(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
exportable_user_ids = context['exportable_user_ids']
query = UserProfile.objects.filter(realm_id=realm.id)
exclude = ['password', 'api_key']
rows = make_raw(list(query), exclude=exclude)
normal_rows = [] # type: List[Record]
dummy_rows = [] # type: List[Record]
for row in rows:
if exportable_user_ids is not None:
if row['id'] in exportable_user_ids:
assert not row['is_mirror_dummy']
else:
# Convert non-exportable users to
# inactive is_mirror_dummy users.
row['is_mirror_dummy'] = True
row['is_active'] = False
if row['is_mirror_dummy']:
dummy_rows.append(row)
else:
normal_rows.append(row)
response['zerver_userprofile'] = normal_rows
response['zerver_userprofile_mirrordummy'] = dummy_rows
def fetch_user_profile_cross_realm(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
if realm.string_id == "zulip":
response['zerver_userprofile_crossrealm'] = []
else:
response['zerver_userprofile_crossrealm'] = [dict(email=x.email, id=x.id) for x in [
get_system_bot(settings.NOTIFICATION_BOT),
get_system_bot(settings.EMAIL_GATEWAY_BOT),
get_system_bot(settings.WELCOME_BOT),
]]
def fetch_attachment_data(response, realm_id, message_ids):
# type: (TableData, int, Set[int]) -> None
filter_args = {'realm_id': realm_id}
query = Attachment.objects.filter(**filter_args)
response['zerver_attachment'] = make_raw(list(query))
floatify_datetime_fields(response, 'zerver_attachment')
'''
We usually export most messages for the realm, but not
quite ALL messages for the realm. So, we need to
clean up our attachment data to have correct
values for response['zerver_attachment'][<n>]['messages'].
'''
for row in response['zerver_attachment']:
filterer_message_ids = set(row['messages']).intersection(message_ids)
row['messages'] = sorted(list(filterer_message_ids))
'''
Attachments can be connected to multiple messages, although
it's most common to have just one message. Regardless,
if none of those message(s) survived the filtering above
for a particular attachment, then we won't export the
attachment row.
'''
response['zerver_attachment'] = [
row for row in response['zerver_attachment']
if row['messages']]
def fetch_huddle_objects(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
assert config.parent is not None
assert config.parent.table is not None
user_profile_ids = set(r['id'] for r in response[config.parent.table])
# First we get all huddles involving someone in the realm.
realm_huddle_subs = Subscription.objects.select_related("recipient").filter(recipient__type=Recipient.HUDDLE,
user_profile__in=user_profile_ids)
realm_huddle_recipient_ids = set(sub.recipient_id for sub in realm_huddle_subs)
# Mark all Huddles whose recipient ID contains a cross-realm user.
unsafe_huddle_recipient_ids = set()
for sub in Subscription.objects.select_related().filter(recipient__in=realm_huddle_recipient_ids):
if sub.user_profile.realm != realm:
# In almost every case the other realm will be zulip.com
unsafe_huddle_recipient_ids.add(sub.recipient_id)
# Now filter down to just those huddles that are entirely within the realm.
#
# This is important for ensuring that the User objects needed
# to import it on the other end exist (since we're only
# exporting the users from this realm), at the cost of losing
# some of these cross-realm messages.
huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]
huddle_recipient_ids = set(sub.recipient_id for sub in huddle_subs)
huddle_ids = set(sub.recipient.type_id for sub in huddle_subs)
huddle_subscription_dicts = make_raw(huddle_subs)
huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))
response['_huddle_recipient'] = huddle_recipients
response['_huddle_subscription'] = huddle_subscription_dicts
response['zerver_huddle'] = make_raw(Huddle.objects.filter(id__in=huddle_ids))
def fetch_usermessages(realm, message_ids, user_profile_ids, message_filename):
# type: (Realm, Set[int], Set[int], Path) -> List[Record]
# UserMessage export security rule: You can export UserMessages
# for the messages you exported for the users in your realm.
user_message_query = UserMessage.objects.filter(user_profile__realm=realm,
message_id__in=message_ids)
user_message_chunk = []
for user_message in user_message_query:
if user_message.user_profile_id not in user_profile_ids:
continue
user_message_obj = model_to_dict(user_message)
user_message_obj['flags_mask'] = user_message.flags.mask
del user_message_obj['flags']
user_message_chunk.append(user_message_obj)
logging.info("Fetched UserMessages for %s" % (message_filename,))
return user_message_chunk
def export_usermessages_batch(input_path, output_path):
# type: (Path, Path) -> None
"""As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command)."""
with open(input_path, "r") as input_file:
output = ujson.loads(input_file.read())
message_ids = [item['id'] for item in output['zerver_message']]
user_profile_ids = set(output['zerver_userprofile_ids'])
del output['zerver_userprofile_ids']
realm = Realm.objects.get(id=output['realm_id'])
del output['realm_id']
output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids, output_path)
write_message_export(output_path, output)
os.unlink(input_path)
def write_message_export(message_filename, output):
# type: (Path, MessageOutput) -> None
write_data_to_file(output_file=message_filename, data=output)
logging.info("Dumped to %s" % (message_filename,))
def export_partial_message_files(realm, response, chunk_size=1000, output_dir=None):
# type: (Realm, TableData, int, Path) -> Set[int]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export")
def get_ids(records):
# type: (List[Record]) -> Set[int]
return set(x['id'] for x in records)
# Basic security rule: You can export everything either...
# - sent by someone in your exportable_user_ids
# OR
# - received by someone in your exportable_user_ids (which
# equates to a recipient object we are exporting)
#
# TODO: In theory, you should be able to export messages in
# cross-realm PM threads; currently, this only exports cross-realm
# messages received by your realm that were sent by Zulip system
# bots (e.g. emailgateway, notification-bot).
# Here, "we" and "us" refers to the inner circle of users who
# were specified as being allowed to be exported. "Them"
# refers to other users.
user_ids_for_us = get_ids(
response['zerver_userprofile']
)
recipient_ids_for_us = get_ids(response['zerver_recipient'])
ids_of_our_possible_senders = get_ids(
response['zerver_userprofile'] +
response['zerver_userprofile_mirrordummy'] +
response['zerver_userprofile_crossrealm'])
ids_of_non_exported_possible_recipients = ids_of_our_possible_senders - user_ids_for_us
recipients_for_them = Recipient.objects.filter(
type=Recipient.PERSONAL,
type_id__in=ids_of_non_exported_possible_recipients).values("id")
recipient_ids_for_them = get_ids(recipients_for_them)
# We capture most messages here, since the
# recipients we subscribe to are also the
# recipients of most messages we send.
messages_we_received = Message.objects.filter(
sender__in=ids_of_our_possible_senders,
recipient__in=recipient_ids_for_us,
).order_by('id')
# This should pick up stragglers; messages we sent
# where we the recipient wasn't subscribed to by any of
# us (such as PMs to "them").
messages_we_sent_to_them = Message.objects.filter(
sender__in=user_ids_for_us,
recipient__in=recipient_ids_for_them,
).order_by('id')
message_queries = [
messages_we_received,
messages_we_sent_to_them
]
all_message_ids = set() # type: Set[int]
dump_file_id = 1
for message_query in message_queries:
dump_file_id = write_message_partial_for_query(
realm=realm,
message_query=message_query,
dump_file_id=dump_file_id,
all_message_ids=all_message_ids,
output_dir=output_dir,
chunk_size=chunk_size,
user_profile_ids=user_ids_for_us,
)
return all_message_ids
def write_message_partial_for_query(realm, message_query, dump_file_id,
all_message_ids, output_dir,
chunk_size, user_profile_ids):
# type: (Realm, Any, int, Set[int], Path, int, Set[int]) -> int
min_id = -1
while True:
actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]
message_chunk = make_raw(actual_query)
message_ids = set(m['id'] for m in message_chunk)
assert len(message_ids.intersection(all_message_ids)) == 0
all_message_ids.update(message_ids)
if len(message_chunk) == 0:
break
# Figure out the name of our shard file.
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
message_filename += '.partial'
logging.info("Fetched Messages for %s" % (message_filename,))
# Clean up our messages.
table_data = {} # type: TableData
table_data['zerver_message'] = message_chunk
floatify_datetime_fields(table_data, 'zerver_message')
# Build up our output for the .partial file, which needs
# a list of user_profile_ids to search for (as well as
# the realm id).
output = {} # type: MessageOutput
output['zerver_message'] = table_data['zerver_message']
output['zerver_userprofile_ids'] = list(user_profile_ids)
output['realm_id'] = realm.id
# And write the data.
write_message_export(message_filename, output)
min_id = max(message_ids)
dump_file_id += 1
return dump_file_id
def export_uploads_and_avatars(realm, output_dir):
# type: (Realm, Path) -> None
uploads_output_dir = os.path.join(output_dir, 'uploads')
avatars_output_dir = os.path.join(output_dir, 'avatars')
for output_dir in (uploads_output_dir, avatars_output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if settings.LOCAL_UPLOADS_DIR:
# Small installations and developers will usually just store files locally.
export_uploads_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "files"),
output_dir=uploads_output_dir)
export_avatars_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=avatars_output_dir)
else:
# Some bigger installations will have their data stored on S3.
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=avatars_output_dir,
processing_avatars=True)
export_files_from_s3(realm,
settings.S3_AUTH_UPLOADS_BUCKET,
output_dir=uploads_output_dir)
def export_files_from_s3(realm, bucket_name, output_dir, processing_avatars=False):
# type: (Realm, str, Path, bool) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records = []
logging.info("Downloading uploaded files from %s" % (bucket_name))
avatar_hash_values = set()
user_ids = set()
if processing_avatars:
bucket_list = bucket.list()
for user_profile in UserProfile.objects.filter(realm=realm):
avatar_path = user_avatar_path_from_ids(user_profile.id, realm.id)
avatar_hash_values.add(avatar_path)
avatar_hash_values.add(avatar_path + ".original")
user_ids.add(user_profile.id)
else:
bucket_list = bucket.list(prefix="%s/" % (realm.id,))
if settings.EMAIL_GATEWAY_BOT is not None:
email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT)
else:
email_gateway_bot = None
count = 0
for bkey in bucket_list:
if processing_avatars and bkey.name not in avatar_hash_values:
continue
key = bucket.get_key(bkey.name)
# This can happen if an email address has moved realms
if 'realm_id' in key.metadata and key.metadata['realm_id'] != str(realm.id):
if email_gateway_bot is None or key.metadata['user_profile_id'] != str(email_gateway_bot.id):
raise Exception("Key metadata problem: %s %s / %s" % (key.name, key.metadata, realm.id))
# Email gateway bot sends messages, potentially including attachments, cross-realm.
print("File uploaded by email gateway bot: %s / %s" % (key.name, key.metadata))
elif processing_avatars:
if 'user_profile_id' not in key.metadata:
raise Exception("Missing user_profile_id in key metadata: %s" % (key.metadata,))
if int(key.metadata['user_profile_id']) not in user_ids:
raise Exception("Wrong user_profile_id in key metadata: %s" % (key.metadata,))
elif 'realm_id' not in key.metadata:
raise Exception("Missing realm_id in key metadata: %s" % (key.metadata,))
record = dict(s3_path=key.name, bucket=bucket_name,
size=key.size, last_modified=key.last_modified,
content_type=key.content_type, md5=key.md5)
record.update(key.metadata)
# A few early avatars don't have 'realm_id' on the object; fix their metadata
user_profile = get_user_profile_by_id(record['user_profile_id'])
if 'realm_id' not in record:
record['realm_id'] = user_profile.realm_id
record['user_profile_email'] = user_profile.email
if processing_avatars:
dirname = output_dir
filename = os.path.join(dirname, key.name)
record['path'] = key.name
else:
fields = key.name.split('/')
if len(fields) != 3:
raise Exception("Suspicious key %s" % (key.name))
dirname = os.path.join(output_dir, fields[1])
filename = os.path.join(dirname, fields[2])
record['path'] = os.path.join(fields[1], fields[2])
if not os.path.exists(dirname):
os.makedirs(dirname)
key.get_contents_to_filename(filename)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_uploads_from_local(realm, local_dir, output_dir):
# type: (Realm, Path, Path) -> None
count = 0
records = []
for attachment in Attachment.objects.filter(realm_id=realm.id):
local_path = os.path.join(local_dir, attachment.path_id)
output_path = os.path.join(output_dir, attachment.path_id)
mkdir_p(os.path.dirname(output_path))
subprocess.check_call(["cp", "-a", local_path, output_path])
stat = os.stat(local_path)
record = dict(realm_id=attachment.realm_id,
user_profile_id=attachment.owner.id,
user_profile_email=attachment.owner.email,
s3_path=attachment.path_id,
path=attachment.path_id,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_avatars_from_local(realm, local_dir, output_dir):
# type: (Realm, Path, Path) -> None
count = 0
records = []
users = list(UserProfile.objects.filter(realm=realm))
users += [
get_system_bot(settings.NOTIFICATION_BOT),
get_system_bot(settings.EMAIL_GATEWAY_BOT),
get_system_bot(settings.WELCOME_BOT),
]
for user in users:
if user.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
continue
avatar_path = user_avatar_path_from_ids(user.id, realm.id)
wildcard = os.path.join(local_dir, avatar_path + '.*')
for local_path in glob.glob(wildcard):
logging.info('Copying avatar file for user %s from %s' % (
user.email, local_path))
fn = os.path.relpath(local_path, local_dir)
output_path = os.path.join(output_dir, fn)
mkdir_p(str(os.path.dirname(output_path)))
subprocess.check_call(["cp", "-a", str(local_path), str(output_path)])
stat = os.stat(local_path)
record = dict(realm_id=realm.id,
user_profile_id=user.id,
user_profile_email=user.email,
s3_path=fn,
path=fn,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def do_write_stats_file_for_realm_export(output_dir):
# type: (Path) -> None
stats_file = os.path.join(output_dir, 'stats.txt')
realm_file = os.path.join(output_dir, 'realm.json')
attachment_file = os.path.join(output_dir, 'attachment.json')
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
fns = sorted([attachment_file] + message_files + [realm_file])
logging.info('Writing stats file: %s\n' % (stats_file,))
with open(stats_file, 'w') as f:
for fn in fns:
f.write(os.path.basename(fn) + '\n')
payload = open(fn).read()
data = ujson.loads(payload)
for k in sorted(data):
f.write('%5d %s\n' % (len(data[k]), k))
f.write('\n')
avatar_file = os.path.join(output_dir, 'avatars/records.json')
uploads_file = os.path.join(output_dir, 'uploads/records.json')
for fn in [avatar_file, uploads_file]:
f.write(fn+'\n')
payload = open(fn).read()
data = ujson.loads(payload)
f.write('%5d records\n' % len(data))
f.write('\n')
def do_export_realm(realm, output_dir, threads, exportable_user_ids=None):
# type: (Realm, Path, int, Set[int]) -> None
response = {} # type: TableData
# We need at least one thread running to export
# UserMessage rows. The management command should
# enforce this for us.
if not settings.TEST_SUITE:
assert threads >= 1
assert os.path.exists("./manage.py")
realm_config = get_realm_config()
create_soft_link(source=output_dir, in_progress=True)
logging.info("Exporting data from get_realm_config()...")
export_from_config(
response=response,
config=realm_config,
seed_object=realm,
context=dict(realm=realm, exportable_user_ids=exportable_user_ids)
)
logging.info('...DONE with get_realm_config() data')
export_file = os.path.join(output_dir, "realm.json")
write_data_to_file(output_file=export_file, data=response)
sanity_check_output(response)
logging.info("Exporting uploaded files and avatars")
export_uploads_and_avatars(realm, output_dir)
# We (sort of) export zerver_message rows here. We write
# them to .partial files that are subsequently fleshed out
# by parallel processes to add in zerver_usermessage data.
# This is for performance reasons, of course. Some installations
# have millions of messages.
logging.info("Exporting .partial files messages")
message_ids = export_partial_message_files(realm, response, output_dir=output_dir)
logging.info('%d messages were exported' % (len(message_ids)))
# zerver_attachment
export_attachment_table(realm=realm, output_dir=output_dir, message_ids=message_ids)
# Start parallel jobs to export the UserMessage objects.
launch_user_message_subprocesses(threads=threads, output_dir=output_dir)
logging.info("Finished exporting %s" % (realm.string_id))
create_soft_link(source=output_dir, in_progress=False)
def export_attachment_table(realm, output_dir, message_ids):
# type: (Realm, Path, Set[int]) -> None
response = {} # type: TableData
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
output_file = os.path.join(output_dir, "attachment.json")
logging.info('Writing attachment table data to %s' % (output_file,))
write_data_to_file(output_file=output_file, data=response)
def create_soft_link(source, in_progress=True):
# type: (Path, bool) -> None
is_done = not in_progress
in_progress_link = '/tmp/zulip-export-in-progress'
done_link = '/tmp/zulip-export-most-recent'
if in_progress:
new_target = in_progress_link
else:
subprocess.check_call(['rm', '-f', in_progress_link])
new_target = done_link
subprocess.check_call(["ln", "-nsf", source, new_target])
if is_done:
logging.info('See %s for output files' % (new_target,))
def launch_user_message_subprocesses(threads, output_dir):
# type: (int, Path) -> None
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))
def run_job(shard):
# type: (str) -> int
subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
str(output_dir), '--thread', shard])
return 0
for (status, job) in run_parallel(run_job,
[str(x) for x in range(0, threads)],
threads=threads):
print("Shard %s finished, status %s" % (job, status))
def do_export_user(user_profile, output_dir):
# type: (UserProfile, Path) -> None
response = {} # type: TableData
export_single_user(user_profile, response)
export_file = os.path.join(output_dir, "user.json")
write_data_to_file(output_file=export_file, data=response)
logging.info("Exporting messages")
export_messages_single_user(user_profile, output_dir)
def export_single_user(user_profile, response):
# type: (UserProfile, TableData) -> None
config = get_single_user_config()
export_from_config(
response=response,
config=config,
seed_object=user_profile,
)
def get_single_user_config():
# type: () -> Config
# zerver_userprofile
user_profile_config = Config(
table='zerver_userprofile',
is_seeded=True,
exclude=['password', 'api_key'],
)
# zerver_subscription
subscription_config = Config(
table='zerver_subscription',
model=Subscription,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# zerver_recipient
recipient_config = Config(
table='zerver_recipient',
model=Recipient,
virtual_parent=subscription_config,
id_source=('zerver_subscription', 'recipient'),
)
# zerver_stream
Config(
table='zerver_stream',
model=Stream,
virtual_parent=recipient_config,
id_source=('zerver_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
)
return user_profile_config
def export_messages_single_user(user_profile, output_dir, chunk_size=1000):
# type: (UserProfile, Path, int) -> None
user_message_query = UserMessage.objects.filter(user_profile=user_profile).order_by("id")
min_id = -1
dump_file_id = 1
while True:
actual_query = user_message_query.select_related("message", "message__sending_client").filter(id__gt=min_id)[0:chunk_size]
user_message_chunk = [um for um in actual_query]
user_message_ids = set(um.id for um in user_message_chunk)
if len(user_message_chunk) == 0:
break
message_chunk = []
for user_message in user_message_chunk:
item = model_to_dict(user_message.message)
item['flags'] = user_message.flags_list()
item['flags_mask'] = user_message.flags.mask
# Add a few nice, human-readable details
item['sending_client_name'] = user_message.message.sending_client.name
item['display_recipient'] = get_display_recipient(user_message.message.recipient)
message_chunk.append(item)
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
logging.info("Fetched Messages for %s" % (message_filename,))
output = {'zerver_message': message_chunk}
floatify_datetime_fields(output, 'zerver_message')
write_message_export(message_filename, output)
min_id = max(user_message_ids)
dump_file_id += 1
# Code from here is the realm import code path
# id_maps is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicity initialize id_maps with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
id_maps = {
'client': {},
'user_profile': {},
} # type: Dict[str, Dict[int, int]]
def update_id_map(table, old_id, new_id):
# type: (TableName, int, int) -> None
if table not in id_maps:
raise Exception('''
Table %s is not initialized in id_maps, which could
mean that we have not thought through circular
dependencies.
''' % (table,))
id_maps[table][old_id] = new_id
def fix_datetime_fields(data, table):
# type: (TableData, TableName) -> None
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=timezone_utc)
def convert_to_id_fields(data, table, field_name):
# type: (TableData, TableName, Field) -> None
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data, table, field_name, related_table, verbose=False):
# type: (TableData, TableName, Field, TableName, bool) -> None
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = id_maps[related_table]
for item in data[table]:
old_id = item[field_name]
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s%s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
item[field_name + "_id"] = new_id
del item[field_name]
def fix_bitfield_keys(data, table, field_name):
# type: (TableData, TableName, Field) -> None
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data, table, field_name):
# type: (TableData, TableName, Field) -> None
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join(['1' if field[1] else '0' for field in
item[field_name]])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def bulk_import_model(data, model, table, dump_file_id=None):
# type: (TableData, Any, TableName, str) -> None
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s." % (model, table))
else:
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data, model, table):
# type: (TableData, Any, TableName) -> None
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def import_uploads_local(import_dir, processing_avatars=False):
# type: (Path, bool) -> None
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
for record in records:
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path)
if record['s3_path'].endswith('.original'):
file_path += '.original'
else:
file_path += '.png'
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", record['s3_path'])
orig_file_path = os.path.join(import_dir, record['path'])
if not os.path.exists(os.path.dirname(file_path)):
subprocess.check_call(["mkdir", "-p", os.path.dirname(file_path)])
shutil.copy(orig_file_path, file_path)
def import_uploads_s3(bucket_name, import_dir, processing_avatars=False):
# type: (str, Path, bool) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
for record in records:
key = Key(bucket)
if processing_avatars:
# For avatars, we need to rehash the user's email with the
# new server's avatar salt
avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
key.key = avatar_path
if record['s3_path'].endswith('.original'):
key.key += '.original'
else:
key.key = record['s3_path']
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in id_maps["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,))
user_profile_id = id_maps["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
key.set_metadata("user_profile_id", str(user_profile.id))
key.set_metadata("realm_id", str(user_profile.realm_id))
key.set_metadata("orig_last_modified", record['last_modified'])
headers = {u'Content-Type': record['content_type']}
key.set_contents_from_filename(os.path.join(import_dir, record['path']), headers=headers)
def import_uploads(import_dir, processing_avatars=False):
# type: (Path, bool) -> None
if processing_avatars:
logging.info("Importing avatars")
else:
logging.info("Importing uploaded files")
if settings.LOCAL_UPLOADS_DIR:
import_uploads_local(import_dir, processing_avatars=processing_avatars)
else:
if processing_avatars:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
import_uploads_s3(bucket_name, import_dir, processing_avatars=processing_avatars)
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir):
# type: (Path) -> None
logging.info("Importing realm dump %s" % (import_dir,))
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
logging.info("Importing realm data from %s" % (realm_data_filename,))
with open(realm_data_filename) as f:
data = ujson.load(f)
convert_to_id_fields(data, 'zerver_realm', 'notifications_stream')
fix_datetime_fields(data, 'zerver_realm')
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id = int(realm.notifications_stream_id) # type: Optional[int]
else:
notifications_stream_id = None
realm.notifications_stream_id = None
realm.save()
bulk_import_client(data, Client, 'zerver_client')
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
convert_to_id_fields(data, 'zerver_stream', 'realm')
bulk_import_model(data, Stream, 'zerver_stream')
realm.notifications_stream_id = notifications_stream_id
realm.save()
convert_to_id_fields(data, "zerver_defaultstream", 'stream')
for (table, model) in realm_tables:
convert_to_id_fields(data, table, 'realm')
bulk_import_model(data, model, table)
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s" % (item['id'], get_system_bot(item['email']).id))
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
fix_datetime_fields(data, 'zerver_userprofile')
convert_to_id_fields(data, 'zerver_userprofile', 'realm')
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
convert_to_id_fields(data, 'zerver_userprofile', 'default_sending_stream')
convert_to_id_fields(data, 'zerver_userprofile', 'default_events_register_stream')
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = random_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
if 'zerver_huddle' in data:
bulk_import_model(data, Huddle, 'zerver_huddle')
bulk_import_model(data, Recipient, 'zerver_recipient')
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
convert_to_id_fields(data, 'zerver_subscription', 'recipient')
bulk_import_model(data, Subscription, 'zerver_subscription')
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
bulk_import_model(data, UserPresence, 'zerver_userpresence')
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
bulk_import_model(data, UserActivity, 'zerver_useractivity')
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
bulk_import_model(data, UserActivityInterval, 'zerver_useractivityinterval')
# Import uploaded files and avatars
import_uploads(os.path.join(import_dir, "avatars"), processing_avatars=True)
import_uploads(os.path.join(import_dir, "uploads"))
# Import zerver_message and zerver_usermessage
import_message_data(import_dir)
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s" % (fn,))
with open(fn) as f:
data = ujson.load(f)
import_attachments(data)
def import_message_data(import_dir):
# type: (Path) -> None
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
logging.info("Importing message dump %s" % (message_filename,))
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
convert_to_id_fields(data, 'zerver_message', 'recipient')
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
bulk_import_model(data, Message, 'zerver_message')
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
convert_to_id_fields(data, 'zerver_usermessage', 'message')
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_model(data, UserMessage, 'zerver_usermessage')
dump_file_id += 1
def import_attachments(data):
# type: (TableData) -> None
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
convert_to_id_fields(data, 'zerver_attachment', 'realm')
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows = [] # type: List[Record]
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row = {} # type: Record
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = fk_id
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data = {m2m_table_name: m2m_rows} # type: TableData
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Next, load the parent rows.
bulk_import_model(data, parent_model, parent_db_table_name)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = '''
insert into %s (%s, %s) values(%%s, %%s);''' % (m2m_table_name,
parent_id,
child_id)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
cursor.executemany(sql_template, tups)
logging.info('Successfully imported M2M table %s' % (m2m_table_name,))
| 38.73222 | 182 | 0.660674 |
56704bd0d9931b502a0376f90959f4b091d0cd0e | 2,282 | py | Python | code_icc/archs/cluster/net5g_two_head.py | ThmCuong/IIC-Python3 | 5a02b40ffa07b159fa7e89cf5b4ed781f4798ff1 | [
"MIT"
] | null | null | null | code_icc/archs/cluster/net5g_two_head.py | ThmCuong/IIC-Python3 | 5a02b40ffa07b159fa7e89cf5b4ed781f4798ff1 | [
"MIT"
] | null | null | null | code_icc/archs/cluster/net5g_two_head.py | ThmCuong/IIC-Python3 | 5a02b40ffa07b159fa7e89cf5b4ed781f4798ff1 | [
"MIT"
] | null | null | null | import torch.nn as nn
from code_icc.archs.cluster.net5g import ClusterNet5gTrunk
from code_icc.archs.cluster.residual import BasicBlock, ResNet
# resnet34 and full channels
__all__ = ["ClusterNet5gTwoHead"]
class ClusterNet5gTwoHeadHead(nn.Module):
def __init__(self, config, output_k, semisup=False):
super(ClusterNet5gTwoHeadHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.semisup = semisup
if not semisup:
self.num_sub_heads = config.num_sub_heads
self.heads = nn.ModuleList([nn.Sequential(
nn.Linear(512 * BasicBlock.expansion, output_k),
nn.Softmax(dim=1)) for _ in xrange(self.num_sub_heads)])
else:
self.head = nn.Linear(512 * BasicBlock.expansion, output_k)
def forward(self, x, kmeans_use_features=False):
if not self.semisup:
results = []
for i in xrange(self.num_sub_heads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
else:
return self.head(x)
class ClusterNet5gTwoHead(ResNet):
def __init__(self, config):
# no saving of configs
super(ClusterNet5gTwoHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.trunk = ClusterNet5gTrunk(config)
self.head_A = ClusterNet5gTwoHeadHead(config, output_k=config.output_k_A)
semisup = (hasattr(config, "semisup") and
config.semisup)
print("semisup: %s" % semisup)
self.head_B = ClusterNet5gTwoHeadHead(config, output_k=config.output_k_B,
semisup=semisup)
self._initialize_weights()
def forward(self, x, head="B", kmeans_use_features=False,
trunk_features=False,
penultimate_features=False):
# default is "B" for use by eval code
# training script switches between A and B
x = self.trunk(x, penultimate_features=penultimate_features)
if trunk_features: # for semisup
return x
# returns list or single
if head == "A":
x = self.head_A(x, kmeans_use_features=kmeans_use_features)
elif head == "B":
x = self.head_B(x, kmeans_use_features=kmeans_use_features)
else:
assert (False)
return x
| 27.829268 | 77 | 0.677038 |
1cf12b2525aebd5fac1aad5b1a56e738d51d957a | 1,667 | py | Python | Huangqun-Weibo.py | Huangqun1998/Data-Crawler-Practice | b2691a18e3710b754b94df6383f6e25ec0a256c8 | [
"Apache-2.0"
] | 1 | 2021-10-05T05:52:39.000Z | 2021-10-05T05:52:39.000Z | Huangqun-Weibo.py | Huangqun1998/Data-Crawler-Practice | b2691a18e3710b754b94df6383f6e25ec0a256c8 | [
"Apache-2.0"
] | null | null | null | Huangqun-Weibo.py | Huangqun1998/Data-Crawler-Practice | b2691a18e3710b754b94df6383f6e25ec0a256c8 | [
"Apache-2.0"
] | 7 | 2020-08-09T09:52:15.000Z | 2020-08-16T08:04:02.000Z | import requests
import json
import pymysql
class Spider:
def __init__(self):
self.flag = True
self.url = 'https://m.weibo.cn/api/container/getIndex?is_all[]=1%3Fis_all%3D1&is_all[]=1&jumpfrom=weibocom&type=uid&value=3604378011&containerid=1076033604378011'
self.headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Mobile Safari/537.36"
}
def parse_url(self, url):
response = requests.get(url,self.headers)
html_str = response.content.decode()
return html_str
def get_content_list(self, html_str):
res_dict = json.loads(html_str)
if res_dict['ok'] != 1:
self.flag = False
since_id = res_dict['data']['cardlistInfo']['since_id']
cards = res_dict['data']['cards']
content_list = []
for card in cards:
if card['card_type'] == 9:
text = card['mblog']['text']
content_list.append(text)
next_url = "https://m.weibo.cn/api/container/getIndex?is_all[]=1%3Fis_all%3D1&is_all[]=1&jumpfrom=weibocom&type=uid&value=3604378011&containerid=1076033604378011&since_id={}".format(since_id)
return content_list,next_url
def run(self):
content = []
while self.flag:
html_str = self.parse_url(self.url)
content_list,next_url = self.get_content_list(html_str)
self.url = next_url
content.extend(content_list)
print(content,'\n')
if __name__ == "__main__":
weibo = Spider()
weibo.run()
| 34.729167 | 199 | 0.613677 |