org_text
stringlengths 761
968k
| texts
sequence | scores
sequence | num_lines
int64 1
25.7k
| avg_score
float64 0
0.31
|
---|---|---|---|---|
#!/usr/bin/env python
""" patrol_smach.py - Version 1.0 2013-04-12
Control a robot to patrol a square area using SMACH
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
from smach import State, StateMachine
from smach_ros import SimpleActionState, IntrospectionServer
from geometry_msgs.msg import Twist
from rbx2_tasks.task_setup import *
class Patrol():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
setup_task_environment(self)
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
# A list to hold then navigation waypoints
nav_states = list()
# Turn the waypoints into SMACH states
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'base_footprint'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'','aborted':'','preempted':''})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')
intro_server.start()
# Execute the state machine for the specified number of patrols
while (self.n_patrols == -1 or self.patrol_count < self.n_patrols) and not rospy.is_shutdown():
sm_outcome = self.sm_patrol.execute()
self.patrol_count += 1
rospy.loginfo("FINISHED PATROL LOOP: " + str(self.patrol_count))
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
Patrol()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
| [
"#!/usr/bin/env python\n",
"\n",
"\"\"\" patrol_smach.py - Version 1.0 2013-04-12\n",
"\n",
" Control a robot to patrol a square area using SMACH\n",
"\n",
" Created for the Pi Robot Project: http://www.pirobot.org\n",
" Copyright (c) 2013 Patrick Goebel. All rights reserved.\n",
"\n",
" This program is free software; you can redistribute it and/or modify\n",
" it under the terms of the GNU General Public License as published by\n",
" the Free Software Foundation; either version 2 of the License, or\n",
" (at your option) any later version.5\n",
" \n",
" This program is distributed in the hope that it will be useful,\n",
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
" GNU General Public License for more details at:\n",
" \n",
" http://www.gnu.org/licenses/gpl.htmlPoint\n",
" \n",
"\"\"\"\n",
"\n",
"import rospy\n",
"from smach import State, StateMachine\n",
"from smach_ros import SimpleActionState, IntrospectionServer\n",
"from geometry_msgs.msg import Twist\n",
"from rbx2_tasks.task_setup import *\n",
"\n",
"class Patrol():\n",
" def __init__(self):\n",
" rospy.init_node('patrol_smach', anonymous=False)\n",
" \n",
" # Set the shutdown function (stop the robot)\n",
" rospy.on_shutdown(self.shutdown)\n",
" \n",
" # Initialize a number of parameters and variables\n",
" setup_task_environment(self)\n",
" \n",
" # Track success rate of getting to the goal locations\n",
" self.n_succeeded = 0\n",
" self.n_aborted = 0\n",
" self.n_preempted = 0\n",
"\n",
" # A list to hold then navigation waypoints\n",
" nav_states = list()\n",
" \n",
" # Turn the waypoints into SMACH states\n",
" for waypoint in self.waypoints: \n",
" nav_goal = MoveBaseGoal()\n",
" nav_goal.target_pose.header.frame_id = 'base_footprint'\n",
" nav_goal.target_pose.pose = waypoint\n",
" move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,\n",
" exec_timeout=rospy.Duration(10.0),\n",
" server_wait_timeout=rospy.Duration(10.0))\n",
" nav_states.append(move_base_state)\n",
" \n",
" # Initialize the patrol state machine\n",
" self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])\n",
"\n",
" # Add the states to the state machine with the appropriate transitions\n",
" with self.sm_patrol: \n",
" StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})\n",
" StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})\n",
" StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})\n",
" StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})\n",
" StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'','aborted':'','preempted':''})\n",
" \n",
" # Create and start the SMACH introspection server\n",
" intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')\n",
" intro_server.start()\n",
" \n",
" # Execute the state machine for the specified number of patrols\n",
" while (self.n_patrols == -1 or self.patrol_count < self.n_patrols) and not rospy.is_shutdown():\n",
" sm_outcome = self.sm_patrol.execute()\n",
" self.patrol_count += 1\n",
" rospy.loginfo(\"FINISHED PATROL LOOP: \" + str(self.patrol_count))\n",
" \n",
" rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))\n",
" \n",
" intro_server.stop()\n",
" \n",
" def move_base_result_cb(self, userdata, status, result):\n",
" if status == actionlib.GoalStatus.SUCCEEDED:\n",
" self.n_succeeded += 1\n",
" elif status == actionlib.GoalStatus.ABORTED:\n",
" self.n_aborted += 1\n",
" elif status == actionlib.GoalStatus.PREEMPTED:\n",
" self.n_preempted += 1\n",
"\n",
" try:\n",
" rospy.loginfo(\"Success rate: \" + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))\n",
" except:\n",
" pass\n",
"\n",
" def shutdown(self):\n",
" rospy.loginfo(\"Stopping the robot...\")\n",
" \n",
" self.sm_patrol.request_preempt()\n",
" \n",
" self.cmd_vel_pub.publish(Twist())\n",
" \n",
" rospy.sleep(1)\n",
"\n",
"if __name__ == '__main__':\n",
" try:\n",
" Patrol()\n",
" except rospy.ROSInterruptException:\n",
" rospy.loginfo(\"SMACH test finished.\")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.0196078431372549,
0,
0,
0,
0.0078125,
0.023809523809523808,
0.02197802197802198,
0,
0.1111111111111111,
0,
0.03571428571428571,
0,
0,
0.024390243902439025,
0.04,
0.04,
0.04,
0.04,
0.05128205128205128,
0.07692307692307693,
0,
0.012345679012345678,
0,
0.1111111111111111,
0,
0.009615384615384616,
0,
0,
0,
0.1111111111111111,
0,
0.058823529411764705,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0.0625,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0.037037037037037035,
0,
0,
0,
0
] | 109 | 0.022425 |
from __future__ import division, print_function, absolute_import
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
import tflearn.data_utils as du
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import matplotlib as matplot
import seaborn as sns
import random
trainx = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv",header=None)
trainy = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv",header=None)
testx = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv",header=None)
testy = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv",header=None)
# Split data into training set and validation set
#training images
trainx = trainx.values.astype('float32')
#training labels
trainy = trainy.values.astype('int32')-1
#testing images
testx = testx.values.astype('float32')
#testing labels
testy = testy.values.astype('int32')-1
original_trainy = trainy
#One Hot encoding of train labels.
trainy = to_categorical(trainy[:,0],28)
original_testy = testy
#One Hot encoding of test labels.
testy = to_categorical(testy[:,0],28)
# reshape input images to 28x28x1
trainx = trainx.reshape([-1, 32, 32, 1])
testx = testx.reshape([-1, 32, 32, 1])
arabic_labels = ['alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',
'reh', 'zain', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',
'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh']
#size of images should be 1200 by 2300
#@return - trainy_new: List of tuples that represent the corners of the 32x32 character box in clockwise order starting from top left
def get_image_batch(trainx, num_images):
pad_left = random.randint(5,2265)
pad_right = 2300-32-pad_left
pad_top = random.randint(5,1165)
pad_bottom = 1200-32-pad_top
trainx_new = np.empty((num_images, pad_left + pad_right + 32, pad_top + pad_bottom + 32, 1))
for i in range(num_images):
index = random.randint(0 ,len(trainx)-1)
trainx_new[i] = np.pad(trainx[index], ((pad_left, pad_right), (pad_top, pad_bottom), (0, 0)), 'constant')
trainy_new = [(pad_left,pad_top),(2300-pad_right,pad_top),(2300-pad_right,1200-pad_bottom),(pad_left,1200-pad_bottom)]
return trainx_new, trainy_new
for i in range(10):
images, labels = get_image_batch(trainx,1)
plt.imshow(images[0].squeeze().T)
print(labels)
plt.show()
#x = random.randint(0, 13440)
#plt.imshow(trainx_new[x].squeeze().T)
#plt.title(arabic_labels[original_trainy[x][0]])
#plt.show()
#Zero center every sample with specified mean. If not specified, the mean is evaluated over all samples.
trainx, mean1 = du.featurewise_zero_center(trainx)
testx, mean2 = du.featurewise_zero_center(testx)
print(trainx.shape, trainy.shape, testx.shape, testy.shape)
# Building convolutional network
network = input_data(shape=[None, 32, 32, 1], name='input')
network = conv_2d(network, 80, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 1024, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 28, activation='softmax')
network = regression(network, optimizer='sgd', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
#model complile
model = tflearn.DNN(network, tensorboard_verbose=0)
#model fitting
scores = []
for i in range(100):
model.fit({'input': trainx}, {'target': trainy}, n_epoch=1,
validation_set=({'input': testx}, {'target': testy}),
snapshot_step=100, show_metric=True, run_id='convnet_arabic_digits')
score = model.evaluate(testx, testy)
print('Test accuracy: %0.2f%%' % (score[0] * 100))
scores.append(score[0]*100)
print(scores)
x = list(range(len(scores)))
y = []
for el in x:
y.append(el + 1)
plt.plot(y, scores, 'k-')
plt.title("Accuracy vs Epochs Trained")
plt.xlabel("Num Epochs")
plt.ylabel("Accuracy on Testing Data")
plt.grid()
plt.show(block=False)
plt.pause(.1)
plt.savefig('AccuracyGraph.pdf')
print(scores)
| [
"from __future__ import division, print_function, absolute_import\n",
"\n",
"import numpy as np # linear algebra\n",
"import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
"import tensorflow as tf\n",
"import tflearn\n",
"from tflearn.data_utils import to_categorical\n",
"import tflearn.data_utils as du\n",
"from tflearn.layers.core import input_data, dropout, fully_connected\n",
"from tflearn.layers.conv import conv_2d, max_pool_2d\n",
"from tflearn.layers.normalization import local_response_normalization\n",
"from tflearn.layers.estimator import regression\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib as matplot\n",
"import seaborn as sns\n",
"import random\n",
"\n",
"\n",
"trainx = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv\",header=None)\n",
"trainy = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv\",header=None)\n",
"\n",
"testx = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv\",header=None)\n",
"testy = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv\",header=None)\n",
"# Split data into training set and validation set\n",
"#training images\n",
"trainx = trainx.values.astype('float32')\n",
"#training labels\n",
"trainy = trainy.values.astype('int32')-1\n",
"#testing images\n",
"testx = testx.values.astype('float32')\n",
"#testing labels\n",
"testy = testy.values.astype('int32')-1\n",
"original_trainy = trainy\n",
"#One Hot encoding of train labels.\n",
"trainy = to_categorical(trainy[:,0],28)\n",
"original_testy = testy\n",
"#One Hot encoding of test labels.\n",
"testy = to_categorical(testy[:,0],28)\n",
"# reshape input images to 28x28x1\n",
"trainx = trainx.reshape([-1, 32, 32, 1])\n",
"testx = testx.reshape([-1, 32, 32, 1])\n",
"\n",
"arabic_labels = ['alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',\n",
" 'reh', 'zain', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',\n",
" 'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh']\n",
"\n",
"#size of images should be 1200 by 2300\n",
"\n",
"\n",
"#@return - trainy_new: List of tuples that represent the corners of the 32x32 character box in clockwise order starting from top left\n",
"def get_image_batch(trainx, num_images):\n",
" pad_left = random.randint(5,2265)\n",
" pad_right = 2300-32-pad_left\n",
" pad_top = random.randint(5,1165)\n",
" pad_bottom = 1200-32-pad_top\n",
" trainx_new = np.empty((num_images, pad_left + pad_right + 32, pad_top + pad_bottom + 32, 1))\n",
" for i in range(num_images):\n",
" index = random.randint(0 ,len(trainx)-1)\n",
" trainx_new[i] = np.pad(trainx[index], ((pad_left, pad_right), (pad_top, pad_bottom), (0, 0)), 'constant')\n",
" trainy_new = [(pad_left,pad_top),(2300-pad_right,pad_top),(2300-pad_right,1200-pad_bottom),(pad_left,1200-pad_bottom)]\n",
" return trainx_new, trainy_new\n",
"\n",
"\n",
"\n",
"for i in range(10):\n",
" images, labels = get_image_batch(trainx,1)\n",
" plt.imshow(images[0].squeeze().T)\n",
" print(labels)\n",
" plt.show()\n",
"\n",
"#x = random.randint(0, 13440)\n",
"#plt.imshow(trainx_new[x].squeeze().T)\n",
"#plt.title(arabic_labels[original_trainy[x][0]])\n",
"#plt.show()\n",
"\n",
"#Zero center every sample with specified mean. If not specified, the mean is evaluated over all samples.\n",
"trainx, mean1 = du.featurewise_zero_center(trainx)\n",
"testx, mean2 = du.featurewise_zero_center(testx)\n",
"\n",
"print(trainx.shape, trainy.shape, testx.shape, testy.shape)\n",
"\n",
"# Building convolutional network\n",
"network = input_data(shape=[None, 32, 32, 1], name='input')\n",
"network = conv_2d(network, 80, 3, activation='relu', regularizer=\"L2\")\n",
"network = max_pool_2d(network, 2)\n",
"network = local_response_normalization(network)\n",
"network = conv_2d(network, 64, 3, activation='relu', regularizer=\"L2\")\n",
"network = max_pool_2d(network, 2)\n",
"network = local_response_normalization(network)\n",
"network = fully_connected(network, 1024, activation='relu')\n",
"network = dropout(network, 0.8)\n",
"network = fully_connected(network, 512, activation='relu')\n",
"network = dropout(network, 0.8)\n",
"network = fully_connected(network, 28, activation='softmax')\n",
"network = regression(network, optimizer='sgd', learning_rate=0.01,\n",
" loss='categorical_crossentropy', name='target')\n",
"\n",
"#model complile\n",
"model = tflearn.DNN(network, tensorboard_verbose=0)\n",
"#model fitting\n",
"scores = []\n",
"\n",
"for i in range(100):\n",
" model.fit({'input': trainx}, {'target': trainy}, n_epoch=1,\n",
" validation_set=({'input': testx}, {'target': testy}),\n",
" snapshot_step=100, show_metric=True, run_id='convnet_arabic_digits')\n",
" score = model.evaluate(testx, testy)\n",
" print('Test accuracy: %0.2f%%' % (score[0] * 100))\n",
" scores.append(score[0]*100)\n",
" print(scores)\n",
" x = list(range(len(scores)))\n",
" y = []\n",
" for el in x:\n",
" y.append(el + 1)\n",
" plt.plot(y, scores, 'k-')\n",
" plt.title(\"Accuracy vs Epochs Trained\")\n",
" plt.xlabel(\"Num Epochs\")\n",
" plt.ylabel(\"Accuracy on Testing Data\")\n",
" plt.grid()\n",
" plt.show(block=False)\n",
" plt.pause(.1)\n",
"\n",
"plt.savefig('AccuracyGraph.pdf')\n",
"\n",
"print(scores)\n"
] | [
0,
0,
0.027777777777777776,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.012658227848101266,
0,
0.012578616352201259,
0.012903225806451613,
0,
0.058823529411764705,
0,
0.058823529411764705,
0,
0.0625,
0,
0.0625,
0,
0,
0.02857142857142857,
0.05,
0,
0.029411764705882353,
0.05263157894736842,
0,
0,
0,
0,
0.011764705882352941,
0.024096385542168676,
0.022222222222222223,
0,
0.02564102564102564,
0,
0,
0.014925373134328358,
0,
0.02631578947368421,
0,
0.02702702702702703,
0,
0.010309278350515464,
0,
0.04081632653061224,
0.008771929824561403,
0.06504065040650407,
0,
0,
0,
0,
0.05,
0.02127659574468085,
0,
0,
0,
0,
0.03333333333333333,
0.02564102564102564,
0.02040816326530612,
0.08333333333333333,
0,
0.01904761904761905,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0.06666666666666667,
0,
0,
0,
0,
0.015384615384615385,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 125 | 0.009461 |
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [info] show [OPTIONS] ARGS
Query a running workflow for:
cylc show REG - workflow metadata
cylc show REG TASK_NAME - task metadata
cylc show REG TASK_GLOB - prerequisites and outputs of task instances
Prerequisite and output status is indicated for current active tasks.
"""
import json
import sys
from ansimarkup import ansiprint
from cylc.flow import ID_DELIM
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.task_id import TaskID
from cylc.flow.terminal import cli_function
WORKFLOW_META_QUERY = '''
query ($wFlows: [ID]!) {
workflows (ids: $wFlows, stripNull: false) {
meta {
title
description
URL
userDefined
}
}
}
'''
TASK_META_QUERY = '''
query ($wFlows: [ID]!, $taskIds: [ID]) {
tasks (workflows: $wFlows, ids: $taskIds, stripNull: false) {
name
meta {
title
description
URL
userDefined
}
}
}
'''
TASK_PREREQS_QUERY = '''
query ($wFlows: [ID]!, $taskIds: [ID]) {
taskProxies (workflows: $wFlows, ids: $taskIds, stripNull: false) {
name
cyclePoint
task {
meta {
title
description
URL
userDefined
}
}
prerequisites {
expression
conditions {
exprAlias
taskId
reqState
message
satisfied
}
satisfied
}
outputs
extras
}
}
'''
def print_msg_state(msg, state):
if state:
ansiprint(f'<green> + {msg}</green>')
else:
ansiprint(f'<red> - {msg}</red>')
def flatten_data(data, flat_data=None):
if flat_data is None:
flat_data = {}
for key, value in data.items():
if isinstance(value, dict):
flatten_data(value, flat_data)
elif isinstance(value, list):
for member in value:
flatten_data(member, flat_data)
else:
flat_data[key] = value
return flat_data
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask=True,
argdoc=[
('REG', 'Suite name'),
('[TASK_NAME or TASK_GLOB ...]', 'Task names or match patterns')])
parser.add_option('--list-prereqs', action="store_true", default=False,
help="Print a task's pre-requisites as a list.")
parser.add_option('--json', action="store_true", default=False,
help="Print output in JSON format.")
return parser
@cli_function(get_option_parser)
def main(_, options, suite, *task_args):
"""Implement "cylc show" CLI."""
pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)
json_filter = {}
if not task_args:
query = WORKFLOW_META_QUERY
query_kwargs = {
'request_string': query,
'variables': {'wFlows': [suite]}
}
# Print suite info.
results = pclient('graphql', query_kwargs)
for workflow in results['workflows']:
flat_data = flatten_data(workflow)
if options.json:
json_filter.update(flat_data)
else:
for key, value in sorted(flat_data.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')
task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]
if task_names:
tasks_query = TASK_META_QUERY
tasks_kwargs = {
'request_string': tasks_query,
'variables': {'wFlows': [suite], 'taskIds': task_names}
}
# Print suite info.
results = pclient('graphql', tasks_kwargs)
multi = len(results['tasks']) > 1
for task in results['tasks']:
flat_data = flatten_data(task['meta'])
if options.json:
json_filter.update({task['name']: flat_data})
else:
if multi:
print(f'----\nTASK NAME: {task["name"]}')
for key, value in sorted(flat_data.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')
if task_ids:
tp_query = TASK_PREREQS_QUERY
tp_kwargs = {
'request_string': tp_query,
'variables': {
'wFlows': [suite],
'taskIds': [
f'{c}{ID_DELIM}{n}'
for n, c in [
TaskID.split(t_id)
for t_id in task_ids
if TaskID.is_valid_id(t_id)
]
] + [
f'{c}{ID_DELIM}{n}'
for c, n in [
t_id.rsplit(TaskID.DELIM2, 1)
for t_id in task_ids
if not TaskID.is_valid_id(t_id)
]
]
}
}
results = pclient('graphql', tp_kwargs)
multi = len(results['taskProxies']) > 1
for t_proxy in results['taskProxies']:
task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
if options.json:
json_filter.update({task_id: t_proxy})
else:
if multi:
print(f'----\nTASK ID: {task_id}')
prereqs = []
for item in t_proxy['prerequisites']:
prefix = ''
multi_cond = len(item['conditions']) > 1
if multi_cond:
prereqs.append([
True,
'',
item['expression'].replace('c', ''),
item['satisfied']
])
for cond in item['conditions']:
if multi_cond and not options.list_prereqs:
prefix = f'\t{cond["exprAlias"].strip("c")} = '
_, _, point, name = cond['taskId'].split(ID_DELIM)
cond_id = TaskID.get(name, point)
prereqs.append([
False,
prefix,
f'{cond_id} {cond["reqState"]}',
cond['satisfied']
])
if options.list_prereqs:
for composite, _, msg, _ in prereqs:
if not composite:
print(msg)
else:
flat_meta = flatten_data(t_proxy['task']['meta'])
for key, value in sorted(flat_meta.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold>'
f' {value or "<m>(not given)</m>"}')
ansiprint(
'\n<bold>prerequisites</bold>'
' (<red>- => not satisfied</red>):')
if not prereqs:
print(' (None)')
for _, prefix, msg, state in prereqs:
print_msg_state(f'{prefix}{msg}', state)
ansiprint(
'\n<bold>outputs</bold>'
' (<red>- => not completed</red>):')
if not t_proxy['outputs']:
print(' (None)')
for key, val in t_proxy['outputs'].items():
print_msg_state(f'{task_id} {key}', val)
if t_proxy['extras']:
print('\nother:')
for key, value in t_proxy['extras'].items():
print(' o %s ... %s' % (key, value))
if not results['taskProxies']:
ansiprint(
f"<red>No matching tasks found: {task_ids}",
file=sys.stderr)
sys.exit(1)
if options.json:
print(json.dumps(json_filter, indent=4))
if __name__ == "__main__":
main()
| [
"#!/usr/bin/env python3\n",
"\n",
"# THIS FILE IS PART OF THE CYLC SUITE ENGINE.\n",
"# Copyright (C) NIWA & British Crown (Met Office) & Contributors.\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\"\"\"cylc [info] show [OPTIONS] ARGS\n",
"\n",
"Query a running workflow for:\n",
" cylc show REG - workflow metadata\n",
" cylc show REG TASK_NAME - task metadata\n",
" cylc show REG TASK_GLOB - prerequisites and outputs of task instances\n",
"\n",
"Prerequisite and output status is indicated for current active tasks.\n",
"\"\"\"\n",
"\n",
"import json\n",
"import sys\n",
"\n",
"from ansimarkup import ansiprint\n",
"\n",
"from cylc.flow import ID_DELIM\n",
"from cylc.flow.option_parsers import CylcOptionParser as COP\n",
"from cylc.flow.network.client import SuiteRuntimeClient\n",
"from cylc.flow.task_id import TaskID\n",
"from cylc.flow.terminal import cli_function\n",
"\n",
"\n",
"WORKFLOW_META_QUERY = '''\n",
"query ($wFlows: [ID]!) {\n",
" workflows (ids: $wFlows, stripNull: false) {\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
"}\n",
"'''\n",
"\n",
"TASK_META_QUERY = '''\n",
"query ($wFlows: [ID]!, $taskIds: [ID]) {\n",
" tasks (workflows: $wFlows, ids: $taskIds, stripNull: false) {\n",
" name\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
"}\n",
"'''\n",
"\n",
"TASK_PREREQS_QUERY = '''\n",
"query ($wFlows: [ID]!, $taskIds: [ID]) {\n",
" taskProxies (workflows: $wFlows, ids: $taskIds, stripNull: false) {\n",
" name\n",
" cyclePoint\n",
" task {\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
" prerequisites {\n",
" expression\n",
" conditions {\n",
" exprAlias\n",
" taskId\n",
" reqState\n",
" message\n",
" satisfied\n",
" }\n",
" satisfied\n",
" }\n",
" outputs\n",
" extras\n",
" }\n",
"}\n",
"'''\n",
"\n",
"\n",
"def print_msg_state(msg, state):\n",
" if state:\n",
" ansiprint(f'<green> + {msg}</green>')\n",
" else:\n",
" ansiprint(f'<red> - {msg}</red>')\n",
"\n",
"\n",
"def flatten_data(data, flat_data=None):\n",
" if flat_data is None:\n",
" flat_data = {}\n",
" for key, value in data.items():\n",
" if isinstance(value, dict):\n",
" flatten_data(value, flat_data)\n",
" elif isinstance(value, list):\n",
" for member in value:\n",
" flatten_data(member, flat_data)\n",
" else:\n",
" flat_data[key] = value\n",
" return flat_data\n",
"\n",
"\n",
"def get_option_parser():\n",
" parser = COP(\n",
" __doc__, comms=True, multitask=True,\n",
" argdoc=[\n",
" ('REG', 'Suite name'),\n",
" ('[TASK_NAME or TASK_GLOB ...]', 'Task names or match patterns')])\n",
"\n",
" parser.add_option('--list-prereqs', action=\"store_true\", default=False,\n",
" help=\"Print a task's pre-requisites as a list.\")\n",
"\n",
" parser.add_option('--json', action=\"store_true\", default=False,\n",
" help=\"Print output in JSON format.\")\n",
"\n",
" return parser\n",
"\n",
"\n",
"@cli_function(get_option_parser)\n",
"def main(_, options, suite, *task_args):\n",
" \"\"\"Implement \"cylc show\" CLI.\"\"\"\n",
" pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)\n",
" json_filter = {}\n",
"\n",
" if not task_args:\n",
" query = WORKFLOW_META_QUERY\n",
" query_kwargs = {\n",
" 'request_string': query,\n",
" 'variables': {'wFlows': [suite]}\n",
" }\n",
" # Print suite info.\n",
" results = pclient('graphql', query_kwargs)\n",
" for workflow in results['workflows']:\n",
" flat_data = flatten_data(workflow)\n",
" if options.json:\n",
" json_filter.update(flat_data)\n",
" else:\n",
" for key, value in sorted(flat_data.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold> {value or \"<m>(not given)</m>\"}')\n",
"\n",
" task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]\n",
" task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]\n",
"\n",
" if task_names:\n",
" tasks_query = TASK_META_QUERY\n",
" tasks_kwargs = {\n",
" 'request_string': tasks_query,\n",
" 'variables': {'wFlows': [suite], 'taskIds': task_names}\n",
" }\n",
" # Print suite info.\n",
" results = pclient('graphql', tasks_kwargs)\n",
" multi = len(results['tasks']) > 1\n",
" for task in results['tasks']:\n",
" flat_data = flatten_data(task['meta'])\n",
" if options.json:\n",
" json_filter.update({task['name']: flat_data})\n",
" else:\n",
" if multi:\n",
" print(f'----\\nTASK NAME: {task[\"name\"]}')\n",
" for key, value in sorted(flat_data.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold> {value or \"<m>(not given)</m>\"}')\n",
"\n",
" if task_ids:\n",
" tp_query = TASK_PREREQS_QUERY\n",
" tp_kwargs = {\n",
" 'request_string': tp_query,\n",
" 'variables': {\n",
" 'wFlows': [suite],\n",
" 'taskIds': [\n",
" f'{c}{ID_DELIM}{n}'\n",
" for n, c in [\n",
" TaskID.split(t_id)\n",
" for t_id in task_ids\n",
" if TaskID.is_valid_id(t_id)\n",
" ]\n",
" ] + [\n",
" f'{c}{ID_DELIM}{n}'\n",
" for c, n in [\n",
" t_id.rsplit(TaskID.DELIM2, 1)\n",
" for t_id in task_ids\n",
" if not TaskID.is_valid_id(t_id)\n",
" ]\n",
" ]\n",
" }\n",
" }\n",
" results = pclient('graphql', tp_kwargs)\n",
" multi = len(results['taskProxies']) > 1\n",
" for t_proxy in results['taskProxies']:\n",
" task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])\n",
" if options.json:\n",
" json_filter.update({task_id: t_proxy})\n",
" else:\n",
" if multi:\n",
" print(f'----\\nTASK ID: {task_id}')\n",
" prereqs = []\n",
" for item in t_proxy['prerequisites']:\n",
" prefix = ''\n",
" multi_cond = len(item['conditions']) > 1\n",
" if multi_cond:\n",
" prereqs.append([\n",
" True,\n",
" '',\n",
" item['expression'].replace('c', ''),\n",
" item['satisfied']\n",
" ])\n",
" for cond in item['conditions']:\n",
" if multi_cond and not options.list_prereqs:\n",
" prefix = f'\\t{cond[\"exprAlias\"].strip(\"c\")} = '\n",
" _, _, point, name = cond['taskId'].split(ID_DELIM)\n",
" cond_id = TaskID.get(name, point)\n",
" prereqs.append([\n",
" False,\n",
" prefix,\n",
" f'{cond_id} {cond[\"reqState\"]}',\n",
" cond['satisfied']\n",
" ])\n",
" if options.list_prereqs:\n",
" for composite, _, msg, _ in prereqs:\n",
" if not composite:\n",
" print(msg)\n",
" else:\n",
" flat_meta = flatten_data(t_proxy['task']['meta'])\n",
" for key, value in sorted(flat_meta.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold>'\n",
" f' {value or \"<m>(not given)</m>\"}')\n",
" ansiprint(\n",
" '\\n<bold>prerequisites</bold>'\n",
" ' (<red>- => not satisfied</red>):')\n",
" if not prereqs:\n",
" print(' (None)')\n",
" for _, prefix, msg, state in prereqs:\n",
" print_msg_state(f'{prefix}{msg}', state)\n",
"\n",
" ansiprint(\n",
" '\\n<bold>outputs</bold>'\n",
" ' (<red>- => not completed</red>):')\n",
" if not t_proxy['outputs']:\n",
" print(' (None)')\n",
" for key, val in t_proxy['outputs'].items():\n",
" print_msg_state(f'{task_id} {key}', val)\n",
" if t_proxy['extras']:\n",
" print('\\nother:')\n",
" for key, value in t_proxy['extras'].items():\n",
" print(' o %s ... %s' % (key, value))\n",
" if not results['taskProxies']:\n",
" ansiprint(\n",
" f\"<red>No matching tasks found: {task_ids}\",\n",
" file=sys.stderr)\n",
" sys.exit(1)\n",
"\n",
" if options.json:\n",
" print(json.dumps(json_filter, indent=4))\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 276 | 0 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
# Matches the output lines from the 'ldd' tool. For example:
# libcrypto.so.10 => /path/to/usr/lib64/libcrypto.so.10 (0x00007fb0cb0a5000)
#
# Note: The following pattern will not match the following two types of
# dependencies and so they will not be included in the output from this module:
#
# 1. The dynamic linker:
# /lib64/ld-linux-x86-64.so.2 (0x00007f6f7ab79000)
# 2. Linux virtual dynamic shared objects:
# linux-vdso.so.1 (0x00007ffc06cfb000)
#
LDD_RE = re.compile(r'^\s+.+? => (\S+) \(0x.+\)')
class DependencyExtractor(object):
"""
This class extracts native library dependencies from the given executable.
"""
def __init__(self):
self.deps_cache = {}
self.lib_allowed_filter = lambda path: True
self.enable_expand_symlinks = False
def set_library_filter(self, lib_allowed_filter):
"""
Specify a filter predicate that should return True iff the specified
library path should be included in the result from extract_deps().
By default, all libraries are included in the result.
"""
self.lib_allowed_filter = lib_allowed_filter
def set_expand_symlinks(self, expand):
"""
Specify whether symlinks should be expanded in the output from
extract_deps(). By default, symlinks are not expanded. See
expand_symlinks().
"""
self.enable_expand_symlinks = expand
def expand_symlinks(self, deps):
"""
ldd will often point to symlinks. Return a list including any symlink in
the specified dependency list as well as whatever it's pointing to,
recursively.
"""
expanded = []
for path in deps:
expanded.append(path)
while os.path.islink(path):
# TODO(mpercy): os.readlink() can return an absolute path. Should we more carefully handle
# the path concatenation here?
path = os.path.join(os.path.dirname(path), os.readlink(path))
expanded.append(path)
return expanded
def extract_deps(self, exe):
"""
Runs 'ldd' on the provided 'exe' path, returning a list of
any libraries it depends on. Blacklisted libraries are
removed from this list.
If the provided 'exe' is not a binary executable, returns
an empty list.
"""
if (exe.endswith(".jar") or
exe.endswith(".pl") or
exe.endswith(".py") or
exe.endswith(".sh") or
exe.endswith(".txt") or
os.path.isdir(exe)):
return []
if exe not in self.deps_cache:
p = subprocess.Popen(["ldd", exe], stdout=subprocess.PIPE)
out, err = p.communicate()
self.deps_cache[exe] = (out, err, p.returncode)
out, err, rc = self.deps_cache[exe]
if rc != 0:
logging.warning("failed to run ldd on %s", exe)
return []
deps = []
for line in out.splitlines():
match = LDD_RE.match(line)
if not match:
continue
dep = match.group(1)
# Apply the provided predicate.
if not self.lib_allowed_filter(dep):
continue
deps.append(dep)
if self.enable_expand_symlinks:
deps = self.expand_symlinks(deps)
return deps
| [
"#!/usr/bin/env python\n",
"#\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n",
"# or more contributor license agreements. See the NOTICE file\n",
"# distributed with this work for additional information\n",
"# regarding copyright ownership. The ASF licenses this file\n",
"# to you under the Apache License, Version 2.0 (the\n",
"# \"License\"); you may not use this file except in compliance\n",
"# with the License. You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing,\n",
"# software distributed under the License is distributed on an\n",
"# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
"# KIND, either express or implied. See the License for the\n",
"# specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import logging\n",
"import os\n",
"import re\n",
"import subprocess\n",
"\n",
"# Matches the output lines from the 'ldd' tool. For example:\n",
"# libcrypto.so.10 => /path/to/usr/lib64/libcrypto.so.10 (0x00007fb0cb0a5000)\n",
"#\n",
"# Note: The following pattern will not match the following two types of\n",
"# dependencies and so they will not be included in the output from this module:\n",
"#\n",
"# 1. The dynamic linker:\n",
"# /lib64/ld-linux-x86-64.so.2 (0x00007f6f7ab79000)\n",
"# 2. Linux virtual dynamic shared objects:\n",
"# linux-vdso.so.1 (0x00007ffc06cfb000)\n",
"#\n",
"LDD_RE = re.compile(r'^\\s+.+? => (\\S+) \\(0x.+\\)')\n",
"\n",
"class DependencyExtractor(object):\n",
" \"\"\"\n",
" This class extracts native library dependencies from the given executable.\n",
" \"\"\"\n",
" def __init__(self):\n",
" self.deps_cache = {}\n",
" self.lib_allowed_filter = lambda path: True\n",
" self.enable_expand_symlinks = False\n",
"\n",
" def set_library_filter(self, lib_allowed_filter):\n",
" \"\"\"\n",
" Specify a filter predicate that should return True iff the specified\n",
" library path should be included in the result from extract_deps().\n",
" By default, all libraries are included in the result.\n",
" \"\"\"\n",
" self.lib_allowed_filter = lib_allowed_filter\n",
"\n",
" def set_expand_symlinks(self, expand):\n",
" \"\"\"\n",
" Specify whether symlinks should be expanded in the output from\n",
" extract_deps(). By default, symlinks are not expanded. See\n",
" expand_symlinks().\n",
" \"\"\"\n",
" self.enable_expand_symlinks = expand\n",
"\n",
" def expand_symlinks(self, deps):\n",
" \"\"\"\n",
" ldd will often point to symlinks. Return a list including any symlink in\n",
" the specified dependency list as well as whatever it's pointing to,\n",
" recursively.\n",
" \"\"\"\n",
" expanded = []\n",
" for path in deps:\n",
" expanded.append(path)\n",
" while os.path.islink(path):\n",
" # TODO(mpercy): os.readlink() can return an absolute path. Should we more carefully handle\n",
" # the path concatenation here?\n",
" path = os.path.join(os.path.dirname(path), os.readlink(path))\n",
" expanded.append(path)\n",
" return expanded\n",
"\n",
" def extract_deps(self, exe):\n",
" \"\"\"\n",
" Runs 'ldd' on the provided 'exe' path, returning a list of\n",
" any libraries it depends on. Blacklisted libraries are\n",
" removed from this list.\n",
"\n",
" If the provided 'exe' is not a binary executable, returns\n",
" an empty list.\n",
" \"\"\"\n",
" if (exe.endswith(\".jar\") or\n",
" exe.endswith(\".pl\") or\n",
" exe.endswith(\".py\") or\n",
" exe.endswith(\".sh\") or\n",
" exe.endswith(\".txt\") or\n",
" os.path.isdir(exe)):\n",
" return []\n",
"\n",
" if exe not in self.deps_cache:\n",
" p = subprocess.Popen([\"ldd\", exe], stdout=subprocess.PIPE)\n",
" out, err = p.communicate()\n",
" self.deps_cache[exe] = (out, err, p.returncode)\n",
"\n",
" out, err, rc = self.deps_cache[exe]\n",
" if rc != 0:\n",
" logging.warning(\"failed to run ldd on %s\", exe)\n",
" return []\n",
"\n",
" deps = []\n",
" for line in out.splitlines():\n",
" match = LDD_RE.match(line)\n",
" if not match:\n",
" continue\n",
" dep = match.group(1)\n",
" # Apply the provided predicate.\n",
" if not self.lib_allowed_filter(dep):\n",
" continue\n",
" deps.append(dep)\n",
"\n",
" if self.enable_expand_symlinks:\n",
" deps = self.expand_symlinks(deps)\n",
" return deps\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0.16666666666666666,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0.029411764705882353,
0.010101010101010102,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.0625,
0,
0,
0.015384615384615385,
0.030303030303030304,
0.018518518518518517,
0,
0,
0,
0.018518518518518517,
0.0625,
0,
0,
0,
0.030303030303030304,
0.05,
0,
0.037037037037037035,
0.02631578947368421,
0.023255813953488372,
0,
0.043478260869565216,
0,
0,
0.025,
0
] | 119 | 0.007546 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_log_forwarding_profile
short_description: Manage log forwarding profiles.
description:
- Manages log forwarding profiles.
author: "Garfield Lee Freeman (@shinmog)"
version_added: "2.8"
requirements:
- pan-python
- pandevice >= 0.11.1
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.vsys_shared
- panos.device_group
options:
name:
description:
- Name of the profile.
required: true
description:
description:
- Profile description
enhanced_logging:
description:
- Valid for PAN-OS 8.1+
- Enabling enhanced application logging.
type: 'bool'
'''
EXAMPLES = '''
# Create a profile
- name: Create log forwarding profile
panos_log_forwarding_profile:
provider: '{{ provider }}'
name: 'my-profile'
enhanced_logging: true
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.objects import LogForwardingProfile
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys_shared=True,
device_group=True,
with_state=True,
with_classic_provider_spec=True,
min_pandevice_version=(0, 11, 1),
min_panos_version=(8, 0, 0),
argument_spec=dict(
name=dict(required=True),
description=dict(),
enhanced_logging=dict(type='bool'),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
try:
listing = LogForwardingProfile.refreshall(parent)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
spec = {
'name': module.params['name'],
'description': module.params['description'],
'enhanced_logging': module.params['enhanced_logging'],
}
obj = LogForwardingProfile(**spec)
parent.add(obj)
changed = helper.apply_state(obj, listing, module)
module.exit_json(changed=changed, msg='Done')
if __name__ == '__main__':
main()
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"from __future__ import absolute_import, division, print_function\n",
"__metaclass__ = type\n",
"\n",
"# Copyright 2019 Palo Alto Networks, Inc\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"ANSIBLE_METADATA = {'metadata_version': '1.1',\n",
" 'status': ['preview'],\n",
" 'supported_by': 'community'}\n",
"\n",
"DOCUMENTATION = '''\n",
"---\n",
"module: panos_log_forwarding_profile\n",
"short_description: Manage log forwarding profiles.\n",
"description:\n",
" - Manages log forwarding profiles.\n",
"author: \"Garfield Lee Freeman (@shinmog)\"\n",
"version_added: \"2.8\"\n",
"requirements:\n",
" - pan-python\n",
" - pandevice >= 0.11.1\n",
"notes:\n",
" - Panorama is supported.\n",
" - Check mode is supported.\n",
"extends_documentation_fragment:\n",
" - panos.transitional_provider\n",
" - panos.vsys_shared\n",
" - panos.device_group\n",
"options:\n",
" name:\n",
" description:\n",
" - Name of the profile.\n",
" required: true\n",
" description:\n",
" description:\n",
" - Profile description\n",
" enhanced_logging:\n",
" description:\n",
" - Valid for PAN-OS 8.1+\n",
" - Enabling enhanced application logging.\n",
" type: 'bool'\n",
"'''\n",
"\n",
"EXAMPLES = '''\n",
"# Create a profile\n",
"- name: Create log forwarding profile\n",
" panos_log_forwarding_profile:\n",
" provider: '{{ provider }}'\n",
" name: 'my-profile'\n",
" enhanced_logging: true\n",
"'''\n",
"\n",
"RETURN = '''\n",
"# Default return values\n",
"'''\n",
"\n",
"from ansible.module_utils.basic import AnsibleModule\n",
"from ansible.module_utils.network.panos.panos import get_connection\n",
"\n",
"\n",
"try:\n",
" from pandevice.objects import LogForwardingProfile\n",
" from pandevice.errors import PanDeviceError\n",
"except ImportError:\n",
" pass\n",
"\n",
"\n",
"def main():\n",
" helper = get_connection(\n",
" vsys_shared=True,\n",
" device_group=True,\n",
" with_state=True,\n",
" with_classic_provider_spec=True,\n",
" min_pandevice_version=(0, 11, 1),\n",
" min_panos_version=(8, 0, 0),\n",
" argument_spec=dict(\n",
" name=dict(required=True),\n",
" description=dict(),\n",
" enhanced_logging=dict(type='bool'),\n",
" ),\n",
" )\n",
" module = AnsibleModule(\n",
" argument_spec=helper.argument_spec,\n",
" supports_check_mode=True,\n",
" required_one_of=helper.required_one_of,\n",
" )\n",
"\n",
" # Verify imports, build pandevice object tree.\n",
" parent = helper.get_pandevice_parent(module)\n",
"\n",
" try:\n",
" listing = LogForwardingProfile.refreshall(parent)\n",
" except PanDeviceError as e:\n",
" module.fail_json(msg='Failed refresh: {0}'.format(e))\n",
"\n",
" spec = {\n",
" 'name': module.params['name'],\n",
" 'description': module.params['description'],\n",
" 'enhanced_logging': module.params['enhanced_logging'],\n",
" }\n",
" obj = LogForwardingProfile(**spec)\n",
" parent.add(obj)\n",
"\n",
" changed = helper.apply_state(obj, listing, module)\n",
" module.exit_json(changed=changed, msg='Done')\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.014705882352941176,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 123 | 0.000273 |
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
from pyspark.context import SparkContext
import numpy as np
import base64
import cPickle as pkl
from tempfile import NamedTemporaryFile
import os
import subprocess
from operator import add
def default_data_loading(sc, data_path, sampling_ratio, seed):
"""
This function loads training data from a text file, sampling it by the provided
ratio and random seed, and interprets each line as a tab-separated (id, data) pair
where 'data' is assumed to be a base64-encoded pickled numpy array. The ids are discarded.
The data is returned as an RDD of numpy arrays.
"""
# Compute the number of cores in our cluster - used below to heuristically set the number of partitions
# TypeError: int() argument must be a string or a number, not 'NoneType' ?
print sc._conf.toDebugString()
#print sc._conf.getAll()
try:
nb_instances = int(sc._conf.get('spark.executor.instances'))
except Exception as inst:
print "[default_data_loading: error] {}. Setting nb_instances to 2.".format(inst)
nb_instances = 2
try:
nb_executor_cores = int(sc._conf.get('spark.executor.cores'))
except Exception as inst:
print "[default_data_loading: error] {}. Setting nb_executor_cores to 2.".format(inst)
nb_executor_cores = 2
total_cores = nb_instances * nb_executor_cores
# Load and sample down the dataset
d = sc.textFile(data_path, total_cores * 3).sample(False, sampling_ratio, seed)
# The data is (id, vector) tab-delimited pairs where each vector is
# a base64-encoded pickled numpy array
deserialize_vec = lambda s: pkl.loads(base64.decodestring(s.split('\t')[1]))
vecs = d.map(deserialize_vec)
return vecs
def main(sc, args, data_load_fn=default_data_loading):
def seqOp(a, b):
a += np.outer(b, b)
return a
def combOp(a, b):
a += b
return a
# Load data
d = data_load_fn(sc, args.data, args.sampling_ratio, args.seed)
d.cache()
# Determine the data dimension
D = len(d.first())
print "d.first: {}, D: {}".format(d.first(),D)
# Count data points
count = d.count()
mu = d.aggregate(np.zeros(D), add, add)
mu = mu / float(count)
# Compute covariance estimator
summed_covar = d.treeAggregate(np.zeros((D, D)), seqOp, combOp, depth=args.agg_depth)
A = summed_covar / (count - 1) - np.outer(mu, mu)
E, P = np.linalg.eigh(A)
params = {
'mu': mu, # mean
'P': P, # PCA matrix
'E': E, # eigenvalues
'A': A, # covariance matrix
'c': count # sample size
}
save_hdfs_pickle(params, args.output)
def save_hdfs_pickle(m, pkl_path):
"""
Given a python object and a path on hdfs, save the object as a pickle file locally and copy the file
to the hdfs path.
"""
print 'Saving pickle to temp file...'
f = NamedTemporaryFile(delete=False)
pkl.dump(m, f, -1)
f.close()
print 'Copying pickle file to hdfs...'
copy_to_hdfs(f, pkl_path)
os.remove(f.name)
def copy_to_hdfs(f, hdfs_path):
subprocess.call(['hadoop', 'fs', '-copyFromLocal', f.name, hdfs_path])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
# Data handling parameters
parser.add_argument('--data', dest='data', type=str, required=True, help='hdfs path to input data')
parser.add_argument('--data_udf', dest='data_udf', type=str, default=None, help='module name from which to load a data loading UDF')
parser.add_argument('--seed', dest='seed', type=int, default=None, help='optional random seed')
parser.add_argument('--sampling_ratio', dest='sampling_ratio', type=float, default=1.0, help='proportion of data to sample for training')
parser.add_argument('--agg_depth', dest='agg_depth', type=int, default=4, help='depth of tree aggregation to compute covariance estimator')
parser.add_argument('--output', dest='output', type=str, default=None, help='hdfs path to output pickle file of parameters')
args = parser.parse_args()
sc = SparkContext(appName='PCA')
# Load UDF module if provided
if args.data_udf:
sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/memex_udf.py')
sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/deepsentibanktf_udf.py')
udf_module = __import__(args.data_udf, fromlist=['udf'])
load_udf = udf_module.udf
main(sc, args, data_load_fn=load_udf)
else:
main(sc, args)
sc.stop()
| [
"# Copyright 2015, Yahoo Inc.\n",
"# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.\n",
"from pyspark.context import SparkContext\n",
"\n",
"import numpy as np\n",
"import base64\n",
"import cPickle as pkl\n",
"from tempfile import NamedTemporaryFile\n",
"import os\n",
"import subprocess\n",
"from operator import add\n",
"\n",
"\n",
"def default_data_loading(sc, data_path, sampling_ratio, seed):\n",
" \"\"\"\n",
" This function loads training data from a text file, sampling it by the provided\n",
" ratio and random seed, and interprets each line as a tab-separated (id, data) pair\n",
" where 'data' is assumed to be a base64-encoded pickled numpy array. The ids are discarded.\n",
" The data is returned as an RDD of numpy arrays.\n",
" \"\"\"\n",
" # Compute the number of cores in our cluster - used below to heuristically set the number of partitions\n",
" # TypeError: int() argument must be a string or a number, not 'NoneType' ?\n",
" print sc._conf.toDebugString()\n",
" #print sc._conf.getAll()\n",
" try:\n",
" nb_instances = int(sc._conf.get('spark.executor.instances'))\n",
" except Exception as inst:\n",
" print \"[default_data_loading: error] {}. Setting nb_instances to 2.\".format(inst)\n",
" nb_instances = 2\n",
" try:\n",
" nb_executor_cores = int(sc._conf.get('spark.executor.cores'))\n",
" except Exception as inst:\n",
" print \"[default_data_loading: error] {}. Setting nb_executor_cores to 2.\".format(inst)\n",
" nb_executor_cores = 2\n",
"\n",
"\n",
" total_cores = nb_instances * nb_executor_cores\n",
"\n",
" # Load and sample down the dataset\n",
" d = sc.textFile(data_path, total_cores * 3).sample(False, sampling_ratio, seed)\n",
"\n",
" # The data is (id, vector) tab-delimited pairs where each vector is\n",
" # a base64-encoded pickled numpy array\n",
" deserialize_vec = lambda s: pkl.loads(base64.decodestring(s.split('\\t')[1]))\n",
" vecs = d.map(deserialize_vec)\n",
"\n",
" return vecs\n",
"\n",
"\n",
"def main(sc, args, data_load_fn=default_data_loading):\n",
"\n",
" def seqOp(a, b):\n",
" a += np.outer(b, b)\n",
" return a\n",
"\n",
" def combOp(a, b):\n",
" a += b\n",
" return a\n",
"\n",
" # Load data\n",
" d = data_load_fn(sc, args.data, args.sampling_ratio, args.seed)\n",
" d.cache()\n",
"\n",
" # Determine the data dimension\n",
" D = len(d.first())\n",
" print \"d.first: {}, D: {}\".format(d.first(),D)\n",
"\n",
" # Count data points\n",
" count = d.count()\n",
" mu = d.aggregate(np.zeros(D), add, add)\n",
" mu = mu / float(count)\n",
"\n",
" # Compute covariance estimator\n",
" summed_covar = d.treeAggregate(np.zeros((D, D)), seqOp, combOp, depth=args.agg_depth)\n",
"\n",
" A = summed_covar / (count - 1) - np.outer(mu, mu)\n",
" E, P = np.linalg.eigh(A)\n",
"\n",
" params = {\n",
" 'mu': mu, # mean\n",
" 'P': P, # PCA matrix\n",
" 'E': E, # eigenvalues\n",
" 'A': A, # covariance matrix\n",
" 'c': count # sample size\n",
" }\n",
"\n",
" save_hdfs_pickle(params, args.output)\n",
"\n",
"\n",
"def save_hdfs_pickle(m, pkl_path):\n",
" \"\"\"\n",
" Given a python object and a path on hdfs, save the object as a pickle file locally and copy the file\n",
" to the hdfs path.\n",
" \"\"\"\n",
" print 'Saving pickle to temp file...'\n",
" f = NamedTemporaryFile(delete=False)\n",
" pkl.dump(m, f, -1)\n",
" f.close()\n",
"\n",
" print 'Copying pickle file to hdfs...'\n",
" copy_to_hdfs(f, pkl_path)\n",
" os.remove(f.name)\n",
"\n",
"\n",
"def copy_to_hdfs(f, hdfs_path):\n",
" subprocess.call(['hadoop', 'fs', '-copyFromLocal', f.name, hdfs_path])\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser()\n",
"\n",
" # Data handling parameters\n",
" parser.add_argument('--data', dest='data', type=str, required=True, help='hdfs path to input data')\n",
" parser.add_argument('--data_udf', dest='data_udf', type=str, default=None, help='module name from which to load a data loading UDF')\n",
" parser.add_argument('--seed', dest='seed', type=int, default=None, help='optional random seed')\n",
" parser.add_argument('--sampling_ratio', dest='sampling_ratio', type=float, default=1.0, help='proportion of data to sample for training')\n",
" parser.add_argument('--agg_depth', dest='agg_depth', type=int, default=4, help='depth of tree aggregation to compute covariance estimator')\n",
"\n",
" parser.add_argument('--output', dest='output', type=str, default=None, help='hdfs path to output pickle file of parameters')\n",
"\n",
" args = parser.parse_args()\n",
"\n",
" sc = SparkContext(appName='PCA')\n",
"\n",
" # Load UDF module if provided\n",
" if args.data_udf:\n",
" sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/memex_udf.py')\n",
" sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/deepsentibanktf_udf.py')\n",
" udf_module = __import__(args.data_udf, fromlist=['udf'])\n",
" load_udf = udf_module.udf\n",
" main(sc, args, data_load_fn=load_udf)\n",
" else:\n",
" main(sc, args)\n",
"\n",
" sc.stop()\n"
] | [
0,
0.008130081300813009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011494252873563218,
0.010526315789473684,
0,
0,
0.009259259259259259,
0,
0,
0.034482758620689655,
0,
0.014285714285714285,
0,
0.011111111111111112,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0.0196078431372549,
0,
0,
0.011904761904761904,
0,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.0072992700729927005,
0.01,
0.007042253521126761,
0.006944444444444444,
0,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0
] | 136 | 0.002114 |
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import logging
import os
from datetime import datetime
try:
import defusedxml.minidom as xml
except ImportError:
import xml.dom.minidom as xml
class instagram:
def __init__(self, cookie):
"""This sets up this class to communicate with Instagram.
Args:
cookie: A dictionary object with the required cookie values (ds_user_id, sessionid, csrftoken).
"""
self.userid = cookie["ds_user_id"]
self.sessionid = cookie["sessionid"]
self.csrftoken = cookie["csrftoken"]
self.mid = cookie["mid"]
self.headers = {
"accept" : "*/*",
"accept-encoding" : "gzip, deflate",
"accept-language" : "en-US",
"content_type" : "application/x-www-form-urlencoded; charset=UTF-8",
"cache-control" : "no-cache",
"cookie" : "ds_user_id=" + self.userid + "; sessionid=" + self.sessionid + "; csrftoken=" + self.csrftoken + "; mid=" + self.mid,
"dnt" : "1",
# "pragma" : "no-cache",
# "referer" : "https://www.instagram.com/",
"user-agent" : "Instagram 10.26.0 (iPhone7,2; iOS 10_1_1; en_US; en-US; scale=2.00; gamut=normal; 750x1334) AppleWebKit/420+",
"x-ig-capabilities": "36oD",
# "x-ig-connection-type" : "WIFI",
# "x-ig-fb-http-engine" : "Liger"
}
self.session = requests.Session()
max_tries = 3
backoff_factor = 0.2
status_forcelist = (500, 502, 503, 504)
retry = Retry(total=max_tries, read=max_tries, connect=max_tries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.headers = self.headers
def getReelTray(self):
"""Get reel tray from API.
Returns:
Response object with reel tray API response
"""
endpoint = "https://i.instagram.com/api/v1/feed/reels_tray/"
response = self.session.get(endpoint, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
return response
def getReelMedia(self, user):
"""Get reel media of a user from API.
Args:
user: User ID
Returns:
Response object with reel media API response
"""
endpoint = "https://i.instagram.com/api/v1/feed/user/" + str(user) + "/reel_media/"
response = self.session.get(endpoint, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
return response
def getStories(self):
return self.getReelTray()
def getUserStories(self, user):
return self.getReelMedia(user)
def getUserIDs(self, json: dict) -> list:
"""Extract user IDs from reel tray JSON.
Args:
json: Reel tray response from IG
Returns:
List of user IDs
"""
users = []
for user in json['tray']:
users.append(user['user']['pk'])
return users
def getFile(self, url: str, dest: str):
"""Download file and save to destination
Args:
url: URL of item to download
dest: File system destination to save item to
Returns:
None
"""
logging.debug("URL: %s", url)
logging.debug("Dest: %s", dest)
try:
if os.path.getsize(dest) == 0:
logging.info("Empty file exists. Removing.")
os.remove(dest)
except FileNotFoundError:
pass
try:
dirpath = os.path.dirname(dest)
os.makedirs(dirpath, exist_ok=True)
with open(dest, "xb") as handle:
response = self.session.get(url, stream=True, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
for data in response.iter_content(chunk_size=4194304):
handle.write(data)
handle.close()
except FileExistsError:
logging.info("File already exists.")
if os.path.getsize(dest) == 0:
logging.info("Error downloading. Removing.")
os.remove(dest)
def formatPath(self, user: str, pk: int, timestamp: int, postid: str, mediatype: int) -> str:
"""Format download path to a specific format/template
Args:
user: User name
pk: User ID
timestamp: UTC Unix timestamp
postid: Post ID
mediatype: Media type as defined by IG
Returns:
None
"""
dirpath = os.path.dirname(__file__)
utcdatetime = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d-%H-%M-%S")
if mediatype == 1:
ext = ".jpg"
type = "stories"
elif mediatype == 2:
ext = ".mp4"
type = "stories"
elif mediatype == 3:
ext = ".mp4"
type = "livestories"
else:
ext = ""
type = "other"
path = os.path.join(dirpath, "downloads", user + "_" + str(pk), type, utcdatetime + "_" + str(timestamp) + "_" + postid + ext)
return path
def downloadReel(self, resp):
"""Download stories of a followed user's tray.
Download the stories of a followed user.
Args:
resp: JSON dictionary of reel from IG API
Returns:
None
"""
try:
for index, item in enumerate(resp['items']):
logging.debug(' ' + str(index))
username = item['user']['username']
userpk = item['user']['pk']
timestamp = item['taken_at']
postid = item['id']
mediatype = item['media_type']
if mediatype == 2: # Video
largest = 0
for versionindex, video in enumerate(item['video_versions']):
itemsize = video['width'] * video['height']
largestsize = item['video_versions'][largest]['width'] * \
item['video_versions'][largest]['height']
if itemsize > largestsize:
largest = versionindex
logging.debug(' V' + str(largest))
url = item['video_versions'][largest]['url']
logging.debug(' ' + url)
elif mediatype == 1: # Image
largest = 0
for versionindex, image in enumerate(item['image_versions2']['candidates']):
itemsize = image['width'] * image['height']
largestsize = item['image_versions2']['candidates'][largest]['width'] * \
item['image_versions2']['candidates'][largest]['height']
if itemsize > largestsize:
largest = versionindex
logging.debug(' I' + str(largest))
url = item['image_versions2']['candidates'][largest]['url']
logging.debug(' ' + url)
else: # Unknown
logging.debug(' E')
url = None
pass
path = self.formatPath(username, userpk, timestamp, postid, mediatype)
self.getFile(url, path)
except KeyError: # JSON 'item' key does not exist for later items in tray as of 6/2/2017
pass
def downloadTray(self, resp):
"""Download stories of logged in user's tray.
Download the stories as available in the tray. The tray contains a list of
reels, a collection of the stories posted by a followed user.
The tray only contains a small set of reels of the first few users. To download
the rest, a reel must be obtained for each user in the tray.
Args:
resp: JSON dictionary of tray from IG API
Returns:
None
"""
for reel in resp['tray']:
self.downloadReel(reel)
def downloadStoryLive(self, resp):
"""Download post-live stories of a followed user's tray.
Download the post-live stories of a followed user.
Args:
resp: JSON dictionary of reel from IG API
Returns:
None
"""
try:
for index,item in enumerate(resp["post_live"]["post_live_items"]):
logging.debug(' ' + str(index))
username = item["user"]["username"]
userpk = item["user"]["pk"]
for bindex,broadcast in enumerate(item["broadcasts"]):
logging.debug(' ' + str(bindex))
timestamp = broadcast["published_time"]
postid = broadcast["media_id"]
dash = broadcast["dash_manifest"]
dashxml = xml.parseString(dash)
elements = dashxml.getElementsByTagName("BaseURL")
for eindex,element in enumerate(elements):
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
url = node.data
mediatype = 3
path = self.formatPath(username, userpk, timestamp, postid + "_" + str(eindex), mediatype)
self.getFile(url, path)
except KeyError: # No "post_live" key
logging.debug(' ' + 'No live stories.')
def close(self):
"""Close seesion to IG
Returns:
None
"""
self.session.close()
| [
"import requests\n",
"from requests.adapters import HTTPAdapter\n",
"from requests.packages.urllib3.util.retry import Retry\n",
"import logging\n",
"import os\n",
"from datetime import datetime\n",
"try:\n",
" import defusedxml.minidom as xml\n",
"except ImportError:\n",
" import xml.dom.minidom as xml\n",
"\n",
"class instagram:\n",
" def __init__(self, cookie):\n",
" \"\"\"This sets up this class to communicate with Instagram.\n",
"\n",
" Args:\n",
" cookie: A dictionary object with the required cookie values (ds_user_id, sessionid, csrftoken).\n",
" \"\"\"\n",
" self.userid = cookie[\"ds_user_id\"]\n",
" self.sessionid = cookie[\"sessionid\"]\n",
" self.csrftoken = cookie[\"csrftoken\"]\n",
" self.mid = cookie[\"mid\"]\n",
" self.headers = {\n",
" \"accept\" : \"*/*\",\n",
" \"accept-encoding\" : \"gzip, deflate\",\n",
" \"accept-language\" : \"en-US\",\n",
" \"content_type\" : \"application/x-www-form-urlencoded; charset=UTF-8\",\n",
" \"cache-control\" : \"no-cache\",\n",
" \"cookie\" : \"ds_user_id=\" + self.userid + \"; sessionid=\" + self.sessionid + \"; csrftoken=\" + self.csrftoken + \"; mid=\" + self.mid,\n",
" \"dnt\" : \"1\",\n",
" # \"pragma\" : \"no-cache\",\n",
" # \"referer\" : \"https://www.instagram.com/\",\n",
" \"user-agent\" : \"Instagram 10.26.0 (iPhone7,2; iOS 10_1_1; en_US; en-US; scale=2.00; gamut=normal; 750x1334) AppleWebKit/420+\",\n",
" \"x-ig-capabilities\": \"36oD\",\n",
" # \"x-ig-connection-type\" : \"WIFI\",\n",
" # \"x-ig-fb-http-engine\" : \"Liger\"\n",
" }\n",
" self.session = requests.Session()\n",
" max_tries = 3\n",
" backoff_factor = 0.2\n",
" status_forcelist = (500, 502, 503, 504)\n",
" retry = Retry(total=max_tries, read=max_tries, connect=max_tries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n",
" adapter = HTTPAdapter(max_retries=retry)\n",
" self.session.mount('http://', adapter)\n",
" self.session.mount('https://', adapter)\n",
" self.session.headers = self.headers\n",
"\n",
" def getReelTray(self):\n",
" \"\"\"Get reel tray from API.\n",
"\n",
" Returns:\n",
" Response object with reel tray API response\n",
" \"\"\"\n",
" endpoint = \"https://i.instagram.com/api/v1/feed/reels_tray/\"\n",
" response = self.session.get(endpoint, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" return response\n",
"\n",
" def getReelMedia(self, user):\n",
" \"\"\"Get reel media of a user from API.\n",
"\n",
" Args:\n",
" user: User ID\n",
"\n",
" Returns:\n",
" Response object with reel media API response\n",
" \"\"\"\n",
" endpoint = \"https://i.instagram.com/api/v1/feed/user/\" + str(user) + \"/reel_media/\"\n",
" response = self.session.get(endpoint, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" return response\n",
"\n",
" def getStories(self):\n",
" return self.getReelTray()\n",
"\n",
" def getUserStories(self, user):\n",
" return self.getReelMedia(user)\n",
"\n",
" def getUserIDs(self, json: dict) -> list:\n",
" \"\"\"Extract user IDs from reel tray JSON.\n",
"\n",
" Args:\n",
" json: Reel tray response from IG\n",
"\n",
" Returns:\n",
" List of user IDs\n",
" \"\"\"\n",
" users = []\n",
" for user in json['tray']:\n",
" users.append(user['user']['pk'])\n",
" return users\n",
"\n",
" def getFile(self, url: str, dest: str):\n",
" \"\"\"Download file and save to destination\n",
"\n",
" Args:\n",
" url: URL of item to download\n",
" dest: File system destination to save item to\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" logging.debug(\"URL: %s\", url)\n",
" logging.debug(\"Dest: %s\", dest)\n",
" try:\n",
" if os.path.getsize(dest) == 0:\n",
" logging.info(\"Empty file exists. Removing.\")\n",
" os.remove(dest)\n",
" except FileNotFoundError:\n",
" pass\n",
"\n",
" try:\n",
" dirpath = os.path.dirname(dest)\n",
" os.makedirs(dirpath, exist_ok=True)\n",
" with open(dest, \"xb\") as handle:\n",
" response = self.session.get(url, stream=True, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" for data in response.iter_content(chunk_size=4194304):\n",
" handle.write(data)\n",
" handle.close()\n",
" except FileExistsError:\n",
" logging.info(\"File already exists.\")\n",
"\n",
" if os.path.getsize(dest) == 0:\n",
" logging.info(\"Error downloading. Removing.\")\n",
" os.remove(dest)\n",
"\n",
" def formatPath(self, user: str, pk: int, timestamp: int, postid: str, mediatype: int) -> str:\n",
" \"\"\"Format download path to a specific format/template\n",
"\n",
" Args:\n",
" user: User name\n",
" pk: User ID\n",
" timestamp: UTC Unix timestamp\n",
" postid: Post ID\n",
" mediatype: Media type as defined by IG\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" dirpath = os.path.dirname(__file__)\n",
" utcdatetime = datetime.utcfromtimestamp(timestamp).strftime(\"%Y-%m-%d-%H-%M-%S\")\n",
" if mediatype == 1:\n",
" ext = \".jpg\"\n",
" type = \"stories\"\n",
" elif mediatype == 2:\n",
" ext = \".mp4\"\n",
" type = \"stories\"\n",
" elif mediatype == 3:\n",
" ext = \".mp4\"\n",
" type = \"livestories\"\n",
" else:\n",
" ext = \"\"\n",
" type = \"other\"\n",
" path = os.path.join(dirpath, \"downloads\", user + \"_\" + str(pk), type, utcdatetime + \"_\" + str(timestamp) + \"_\" + postid + ext)\n",
" return path\n",
"\n",
" def downloadReel(self, resp):\n",
" \"\"\"Download stories of a followed user's tray.\n",
"\n",
" Download the stories of a followed user.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of reel from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" try:\n",
" for index, item in enumerate(resp['items']):\n",
" logging.debug(' ' + str(index))\n",
" username = item['user']['username']\n",
" userpk = item['user']['pk']\n",
" timestamp = item['taken_at']\n",
" postid = item['id']\n",
" mediatype = item['media_type']\n",
" if mediatype == 2: # Video\n",
" largest = 0\n",
" for versionindex, video in enumerate(item['video_versions']):\n",
" itemsize = video['width'] * video['height']\n",
" largestsize = item['video_versions'][largest]['width'] * \\\n",
" item['video_versions'][largest]['height']\n",
" if itemsize > largestsize:\n",
" largest = versionindex\n",
" logging.debug(' V' + str(largest))\n",
" url = item['video_versions'][largest]['url']\n",
" logging.debug(' ' + url)\n",
" elif mediatype == 1: # Image\n",
" largest = 0\n",
" for versionindex, image in enumerate(item['image_versions2']['candidates']):\n",
" itemsize = image['width'] * image['height']\n",
" largestsize = item['image_versions2']['candidates'][largest]['width'] * \\\n",
" item['image_versions2']['candidates'][largest]['height']\n",
" if itemsize > largestsize:\n",
" largest = versionindex\n",
" logging.debug(' I' + str(largest))\n",
" url = item['image_versions2']['candidates'][largest]['url']\n",
" logging.debug(' ' + url)\n",
" else: # Unknown\n",
" logging.debug(' E')\n",
" url = None\n",
" pass\n",
"\n",
" path = self.formatPath(username, userpk, timestamp, postid, mediatype)\n",
" self.getFile(url, path)\n",
" except KeyError: # JSON 'item' key does not exist for later items in tray as of 6/2/2017\n",
" pass\n",
"\n",
" def downloadTray(self, resp):\n",
" \"\"\"Download stories of logged in user's tray.\n",
"\n",
" Download the stories as available in the tray. The tray contains a list of\n",
" reels, a collection of the stories posted by a followed user.\n",
"\n",
" The tray only contains a small set of reels of the first few users. To download\n",
" the rest, a reel must be obtained for each user in the tray.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of tray from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" for reel in resp['tray']:\n",
" self.downloadReel(reel)\n",
"\n",
" def downloadStoryLive(self, resp):\n",
" \"\"\"Download post-live stories of a followed user's tray.\n",
"\n",
" Download the post-live stories of a followed user.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of reel from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" try:\n",
" for index,item in enumerate(resp[\"post_live\"][\"post_live_items\"]):\n",
" logging.debug(' ' + str(index))\n",
" username = item[\"user\"][\"username\"]\n",
" userpk = item[\"user\"][\"pk\"]\n",
" for bindex,broadcast in enumerate(item[\"broadcasts\"]):\n",
" logging.debug(' ' + str(bindex))\n",
" timestamp = broadcast[\"published_time\"]\n",
" postid = broadcast[\"media_id\"]\n",
" dash = broadcast[\"dash_manifest\"]\n",
" dashxml = xml.parseString(dash)\n",
" elements = dashxml.getElementsByTagName(\"BaseURL\")\n",
" for eindex,element in enumerate(elements):\n",
" for node in element.childNodes:\n",
" if node.nodeType == node.TEXT_NODE:\n",
" url = node.data\n",
" mediatype = 3\n",
" path = self.formatPath(username, userpk, timestamp, postid + \"_\" + str(eindex), mediatype)\n",
" self.getFile(url, path)\n",
" except KeyError: # No \"post_live\" key\n",
" logging.debug(' ' + 'No live stories.')\n",
"\n",
" def close(self):\n",
" \"\"\"Close seesion to IG\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" self.session.close()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0.025,
0.02,
0.023809523809523808,
0.023529411764705882,
0.022222222222222223,
0.013157894736842105,
0.02631578947368421,
0,
0,
0.013793103448275862,
0,
0,
0,
0,
0,
0,
0,
0,
0.007142857142857143,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007407407407407408,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0.012195121951219513,
0,
0.012048192771084338,
0.0125,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0.010309278350515464,
0,
0.01020408163265306,
0.021052631578947368,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0.011494252873563218,
0,
0.020618556701030927,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0.008130081300813009,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 272 | 0.002124 |
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
try:
snakebite_imported = True
from snakebite.client import Client, HAClient, Namenode
except ImportError:
snakebite_imported = False
from airflow.utils import AirflowException
class HDFSHookException(AirflowException):
pass
class HDFSHook(BaseHook):
'''
Interact with HDFS. This class is a wrapper around the snakebite library.
'''
def __init__(self, hdfs_conn_id='hdfs_default', proxy_user=None):
if not snakebite_imported:
raise ImportError(
'This HDFSHook implementation requires snakebite, but '
'snakebite is not compatible with Python 3 '
'(as of August 2015). Please use Python 2 if you require '
'this hook -- or help by submitting a PR!')
self.hdfs_conn_id = hdfs_conn_id
self.proxy_user = proxy_user
def get_conn(self):
'''
Returns a snakebite HDFSClient object.
'''
use_sasl = False
if configuration.get('core', 'security') == 'kerberos':
use_sasl = True
connections = self.get_connections(self.hdfs_conn_id)
client = None
''' When using HAClient, proxy_user must be the same, so is ok to always take the first '''
effective_user = self.proxy_user or connections[0].login
if len(connections) == 1:
client = Client(connections[0].host, connections[0].port, use_sasl=use_sasl, effective_user=effective_user)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn, use_sasl=use_sasl, effective_user=effective_user)
else:
raise HDFSHookException("conn_id doesn't exist in the repository")
return client
| [
"from airflow.hooks.base_hook import BaseHook\n",
"from airflow import configuration\n",
"\n",
"try:\n",
" snakebite_imported = True\n",
" from snakebite.client import Client, HAClient, Namenode\n",
"except ImportError:\n",
" snakebite_imported = False\n",
"\n",
"from airflow.utils import AirflowException\n",
"\n",
"\n",
"class HDFSHookException(AirflowException):\n",
" pass\n",
"\n",
"\n",
"class HDFSHook(BaseHook):\n",
" '''\n",
" Interact with HDFS. This class is a wrapper around the snakebite library.\n",
" '''\n",
" def __init__(self, hdfs_conn_id='hdfs_default', proxy_user=None):\n",
" if not snakebite_imported:\n",
" raise ImportError(\n",
" 'This HDFSHook implementation requires snakebite, but '\n",
" 'snakebite is not compatible with Python 3 '\n",
" '(as of August 2015). Please use Python 2 if you require '\n",
" 'this hook -- or help by submitting a PR!')\n",
" self.hdfs_conn_id = hdfs_conn_id\n",
" self.proxy_user = proxy_user\n",
"\n",
" def get_conn(self):\n",
" '''\n",
" Returns a snakebite HDFSClient object.\n",
" '''\n",
" use_sasl = False\n",
" if configuration.get('core', 'security') == 'kerberos':\n",
" use_sasl = True\n",
"\n",
" connections = self.get_connections(self.hdfs_conn_id)\n",
" client = None\n",
"\t''' When using HAClient, proxy_user must be the same, so is ok to always take the first '''\n",
"\teffective_user = self.proxy_user or connections[0].login\n",
" if len(connections) == 1:\n",
" client = Client(connections[0].host, connections[0].port, use_sasl=use_sasl, effective_user=effective_user)\n",
" elif len(connections) > 1:\n",
" nn = [Namenode(conn.host, conn.port) for conn in connections]\n",
" client = HAClient(nn, use_sasl=use_sasl, effective_user=effective_user)\n",
" else:\n",
" raise HDFSHookException(\"conn_id doesn't exist in the repository\")\n",
" return client\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0.017241379310344827,
0.029411764705882353,
0.008333333333333333,
0,
0,
0.011904761904761904,
0,
0,
0
] | 50 | 0.001983 |
import json
import csv
"""
def write_to_text(data, filename):
f = open(filename, 'w', encoding = 'utf-8')
for line in data:
f.write(line + '\n')
f.close()
"""
def read_text_file(filename):
ignore = []
ignore.append('[')
ignore.append(']')
ignore.append('')
f = open(filename, encoding='utf-8')
table_original = []
for line in f:
line = line.rstrip('\n')
if line not in ignore:
if 'Getting snapshot pages' not in line:
table_original.append(line.rstrip(','))
return table_original
def write_to_csv(data, filename):
with open(filename, 'w') as csvfile:
w = csv.writer(csvfile, delimiter = ',')
for row in data:
try:
w.writerow(row)
except:
print(row, 'not written to file.')
print(filename, 'created.')
def extract_url_database(filename):
u = read_text_file(filename)
url_list = [['url', 'timestamp', 'id']]
for article in u:
url_object = []
url_object.append(json.loads(article)['file_url'])
url_object.append(json.loads(article)['timestamp'])
url_object.append(json.loads(article)['file_id'])
url_list.append(url_object)
return(url_list)
# write list to text file, one URL per line
write_to_csv(extract_url_database('whg.txt'), 'whg_url_data.csv')
| [
"import json\n",
"import csv\n",
"\n",
"\"\"\"\n",
"def write_to_text(data, filename):\n",
" f = open(filename, 'w', encoding = 'utf-8')\n",
" for line in data:\n",
" f.write(line + '\\n')\n",
" f.close()\n",
"\"\"\"\n",
"\n",
"def read_text_file(filename):\n",
" ignore = []\n",
" ignore.append('[')\n",
" ignore.append(']')\n",
" ignore.append('')\n",
" f = open(filename, encoding='utf-8')\n",
" table_original = []\n",
" for line in f:\n",
" line = line.rstrip('\\n')\n",
" if line not in ignore:\n",
" if 'Getting snapshot pages' not in line:\n",
" table_original.append(line.rstrip(','))\n",
" return table_original\n",
"\n",
"def write_to_csv(data, filename):\n",
" with open(filename, 'w') as csvfile:\n",
" w = csv.writer(csvfile, delimiter = ',')\n",
" for row in data:\n",
" try:\n",
" w.writerow(row)\n",
" except:\n",
" print(row, 'not written to file.')\n",
" print(filename, 'created.')\n",
"\n",
"def extract_url_database(filename):\n",
" u = read_text_file(filename)\n",
" url_list = [['url', 'timestamp', 'id']]\n",
" for article in u:\n",
" url_object = []\n",
" url_object.append(json.loads(article)['file_url'])\n",
" url_object.append(json.loads(article)['timestamp'])\n",
" url_object.append(json.loads(article)['file_id'])\n",
" url_list.append(url_object)\n",
" return(url_list)\n",
"\n",
"# write list to text file, one URL per line\n",
"write_to_csv(extract_url_database('whg.txt'), 'whg_url_data.csv')\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.04081632653061224,
0,
0,
0,
0.05,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
1
] | 49 | 0.024418 |
#!/usr/bin/python -u
from optparse import OptionParser, OptionGroup
from sys import argv as sys_argv, exit
from swiftclient import Client
def upload(options):
values = {
'username': options.os_username,
'password': options.os_password,
'auth_url': options.os_auth_url,
'project_name': options.os_project_name,
'user_domain_name': options.os_user_domain_name,
'project_domain_name': options.os_project_domain_name,
'storage_url': options.os_storage_url,
}
container = options.container
path = options.upload_path
cli = Client(values)
cli.upload(container, path)
def download(options):
values = {
'username': options.os_username,
'password': options.os_password,
'auth_url': options.os_auth_url,
'project_name': options.os_project_name,
'user_domain_name': options.os_user_domain_name,
'project_domain_name': options.os_project_domain_name,
'storage_url': options.os_storage_url,
}
container = options.container
objectname = options.object
download_path = options.download_path
cli = Client(values)
cli.download(container, objectname, download_path)
def main(arguments=None):
if arguments:
argv = arguments
else:
argv = sys_argv
version = '0.0.1'
parser = OptionParser(version='%%prog %s' % version,
usage='''
Command-line interface to the OpenStack Swift API.
usage: %%prog [--version] [--help]
Mandatory Switch:
[--os-username <auth-user-name>]
[--os-password <auth-password>]
[--os-project-name <auth-project-name>]
[--os-auth-url <auth-url>]
[--os-user-domain-name <auth-user-domain-name>]
[--os-project-domain-name <auth-project-domain-name>]
[--os-storage-url <storage-url>]
[--operation-type <operation-type>]
[--container <container-name>]
Command Specific Switch:
For Upload (Uploads files or directories to the given container from the upload path.):
[--upload-path <upload-path>]
For Download (Downloads files from the given container in the download path.):
[--object <object-name>]
[--download-path <download-path>]
'''.strip('\n') % globals())
parser.add_option('--insecure',
action="store_true", dest="insecure",
default=True,
help='Allow swiftclient to access servers without '
'having to verify the SSL certificate. '
'Defaults to env[SWIFTCLIENT_INSECURE] '
'(set to \'true\' to enable).')
os_grp = OptionGroup(parser, "OpenStack authentication options")
os_grp.add_option('--os-username',
metavar='<auth-user-name>',
help='OpenStack username required to authenticate with OpenStack swift. ')
os_grp.add_option('--os_username',
help='OpenStack username required to authenticate with OpenStack swift. ')
os_grp.add_option('--os-password',
metavar='<auth-password>',
help='OpenStack password required to authenticate with OpenStack swift.')
os_grp.add_option('--os-user-domain-name',
metavar='<user-domain-name>',
help='OpenStack user domain name required to connect with OpenStack swift.')
os_grp.add_option('--os-project-name',
metavar='<project-name>',
help='OpenStack project name required to connect with OpenStack swift.')
os_grp.add_option('--os-project-domain-name',
metavar='<project-domain-name>',
help='OpenStack project domain name required to connect with OpenStack swift.')
os_grp.add_option('--os-auth-url',
metavar='<auth-url>',
help='OpenStack auth URL required to authenticate with OpenStack Identity to get the '
'authentication token.')
os_grp.add_option('--os-storage-url',
metavar='<storage-url>',
help='OpenStack storage URL required to connect with the OpenStack Swift.')
os_grp.add_option('--operation-type',
metavar='<operation-type>',
help='Specified OpenStack swift related operation which can be upload or download.')
os_grp.add_option('--container',
metavar='<container-name>',
help='Specified container name to upload/download object.')
os_grp.add_option('--object',
metavar='<object-name>',
help='Specified object name to be downloaded in the downloaded path.')
os_grp.add_option('--upload-path',
metavar='<upload-path>',
help='Upload path of the file or directory.')
os_grp.add_option('--download-path',
metavar='<download-path>',
help='Download path to download the object.')
(options, args) = parser.parse_args(argv[1:])
try:
if(options.operation_type == 'upload'):
if(options.upload_path is None):
parser.print_help()
exit()
else:
upload(options)
elif(options.operation_type == 'download'):
if (options.object is None and options.download_path is None):
parser.print_help()
exit()
else:
download(options)
else:
parser.print_help()
exit()
except Exception as err:
print(str(err))
if __name__ == '__main__':
main()
| [
"#!/usr/bin/python -u\n",
"\n",
"from optparse import OptionParser, OptionGroup\n",
"from sys import argv as sys_argv, exit\n",
"\n",
"from swiftclient import Client\n",
"\n",
"def upload(options):\n",
"\n",
" values = {\n",
" 'username': options.os_username,\n",
" 'password': options.os_password,\n",
" 'auth_url': options.os_auth_url,\n",
" 'project_name': options.os_project_name,\n",
" 'user_domain_name': options.os_user_domain_name,\n",
" 'project_domain_name': options.os_project_domain_name,\n",
" 'storage_url': options.os_storage_url,\n",
" }\n",
"\n",
" container = options.container\n",
" path = options.upload_path\n",
"\n",
" cli = Client(values)\n",
" cli.upload(container, path)\n",
"\n",
"def download(options):\n",
"\n",
" values = {\n",
" 'username': options.os_username,\n",
" 'password': options.os_password,\n",
" 'auth_url': options.os_auth_url,\n",
" 'project_name': options.os_project_name,\n",
" 'user_domain_name': options.os_user_domain_name,\n",
" 'project_domain_name': options.os_project_domain_name,\n",
" 'storage_url': options.os_storage_url,\n",
" }\n",
"\n",
" container = options.container\n",
" objectname = options.object\n",
" download_path = options.download_path\n",
"\n",
" cli = Client(values)\n",
" cli.download(container, objectname, download_path)\n",
"\n",
"def main(arguments=None):\n",
" if arguments:\n",
" argv = arguments\n",
" else:\n",
" argv = sys_argv\n",
"\n",
" version = '0.0.1'\n",
"\n",
" parser = OptionParser(version='%%prog %s' % version,\n",
" usage='''\n",
"Command-line interface to the OpenStack Swift API.\n",
"\n",
"usage: %%prog [--version] [--help]\n",
"\n",
"Mandatory Switch: \n",
" [--os-username <auth-user-name>]\n",
" [--os-password <auth-password>]\n",
" [--os-project-name <auth-project-name>]\n",
" [--os-auth-url <auth-url>]\n",
" [--os-user-domain-name <auth-user-domain-name>]\n",
" [--os-project-domain-name <auth-project-domain-name>]\n",
" [--os-storage-url <storage-url>]\n",
" [--operation-type <operation-type>]\n",
" [--container <container-name>]\n",
"\n",
"Command Specific Switch:\n",
"\n",
"For Upload (Uploads files or directories to the given container from the upload path.):\n",
" [--upload-path <upload-path>]\n",
"\n",
"For Download (Downloads files from the given container in the download path.):\n",
" [--object <object-name>]\n",
" [--download-path <download-path>] \n",
"'''.strip('\\n') % globals())\n",
" parser.add_option('--insecure',\n",
" action=\"store_true\", dest=\"insecure\",\n",
" default=True,\n",
" help='Allow swiftclient to access servers without '\n",
" 'having to verify the SSL certificate. '\n",
" 'Defaults to env[SWIFTCLIENT_INSECURE] '\n",
" '(set to \\'true\\' to enable).')\n",
"\n",
" os_grp = OptionGroup(parser, \"OpenStack authentication options\")\n",
"\n",
" os_grp.add_option('--os-username',\n",
" metavar='<auth-user-name>',\n",
" help='OpenStack username required to authenticate with OpenStack swift. ')\n",
" os_grp.add_option('--os_username',\n",
" help='OpenStack username required to authenticate with OpenStack swift. ')\n",
" os_grp.add_option('--os-password',\n",
" metavar='<auth-password>',\n",
" help='OpenStack password required to authenticate with OpenStack swift.')\n",
" os_grp.add_option('--os-user-domain-name',\n",
" metavar='<user-domain-name>',\n",
" help='OpenStack user domain name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-project-name',\n",
" metavar='<project-name>',\n",
" help='OpenStack project name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-project-domain-name',\n",
" metavar='<project-domain-name>',\n",
" help='OpenStack project domain name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-auth-url',\n",
" metavar='<auth-url>',\n",
" help='OpenStack auth URL required to authenticate with OpenStack Identity to get the '\n",
" 'authentication token.')\n",
" os_grp.add_option('--os-storage-url',\n",
" metavar='<storage-url>',\n",
" help='OpenStack storage URL required to connect with the OpenStack Swift.')\n",
" os_grp.add_option('--operation-type',\n",
" metavar='<operation-type>',\n",
" help='Specified OpenStack swift related operation which can be upload or download.')\n",
" os_grp.add_option('--container',\n",
" metavar='<container-name>',\n",
" help='Specified container name to upload/download object.')\n",
" os_grp.add_option('--object',\n",
" metavar='<object-name>',\n",
" help='Specified object name to be downloaded in the downloaded path.')\n",
" os_grp.add_option('--upload-path',\n",
" metavar='<upload-path>',\n",
" help='Upload path of the file or directory.')\n",
" os_grp.add_option('--download-path',\n",
" metavar='<download-path>',\n",
" help='Download path to download the object.')\n",
"\n",
" (options, args) = parser.parse_args(argv[1:])\n",
"\n",
" try:\n",
" if(options.operation_type == 'upload'):\n",
" if(options.upload_path is None):\n",
" parser.print_help()\n",
" exit()\n",
" else:\n",
" upload(options)\n",
" elif(options.operation_type == 'download'):\n",
" if (options.object is None and options.download_path is None):\n",
" parser.print_help()\n",
" exit()\n",
" else:\n",
" download(options)\n",
" else:\n",
" parser.print_help()\n",
" exit()\n",
"\n",
" except Exception as err:\n",
" print(str(err))\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0.010309278350515464,
0,
0,
0.010416666666666666,
0,
0,
0.010101010101010102,
0,
0,
0.010526315789473684,
0,
0,
0.00980392156862745,
0,
0,
0.009174311926605505,
0,
0,
0,
0.020202020202020204,
0,
0,
0.009345794392523364,
0,
0,
0.012195121951219513,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 152 | 0.002443 |
# Copyright (c) 2015 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import paste.urlmap
from congress.api import application
from congress.api import versions
def wsgi_app():
mapper = paste.urlmap.URLMap()
mapper['/'] = versions.Versions()
api_resource_mgr = application.ResourceManager()
api_resource_mgr.register_handler(versions.VersionV1Handler(r'/v1[/]?'))
app = application.ApiApplication(api_resource_mgr)
mapper['/v1'] = app
return mapper
| [
"# Copyright (c) 2015 Huawei, Inc. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"#\n",
"\n",
"import paste.urlmap\n",
"\n",
"from congress.api import application\n",
"from congress.api import versions\n",
"\n",
"\n",
"def wsgi_app():\n",
"\n",
" mapper = paste.urlmap.URLMap()\n",
" mapper['/'] = versions.Versions()\n",
"\n",
" api_resource_mgr = application.ResourceManager()\n",
" api_resource_mgr.register_handler(versions.VersionV1Handler(r'/v1[/]?'))\n",
" app = application.ApiApplication(api_resource_mgr)\n",
" mapper['/v1'] = app\n",
"\n",
" return mapper\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 32 | 0 |
from __future__ import print_function
from ase.db.table import dict2forces, hill
from ase.data import atomic_masses, chemical_symbols
from ase.db.core import float_to_time_string, now
import numpy as np
class Summary:
def __init__(self, dct, subscript=None):
self.dct = dct
self.cell = [['{0:.3f}'.format(a) for a in axis] for axis in dct.cell]
forces = dict2forces(dct)
if forces is None:
fmax = None
self.forces = None
else:
fmax = (forces**2).sum(1).max()**0.5
N = len(forces)
self.forces = []
for n, f in enumerate(forces):
if n < 5 or n >= N - 5:
f = tuple('{0:10.3f}'.format(x) for x in f)
symbol = chemical_symbols[dct.numbers[n]]
self.forces.append((n, symbol) + f)
elif n == 5:
self.forces.append((' ...', '',
' ...',
' ...',
' ...'))
self.stress = dct.get('stress')
if self.stress is not None:
self.stress = ', '.join('{0:.3f}'.format(s) for s in self.stress)
if 'masses' in dct:
mass = dct.masses.sum()
else:
mass = atomic_masses[dct.numbers].sum()
formula = hill(dct.numbers)
if subscript:
formula = subscript.sub(r'<sub>\1</sub>', formula)
table = [
('id', dct.id),
('age', float_to_time_string(now() - dct.ctime, True)),
('formula', formula),
('user', dct.user),
('calculator', dct.get('calculator')),
('energy [eV]', dct.get('energy')),
('fmax [eV/Ang]', fmax),
('charge [|e|]', dct.get('charge')),
('mass [au]', mass),
('unique id', dct.unique_id),
('volume [Ang^3]', abs(np.linalg.det(dct.cell)))]
self.table = [(name, value) for name, value in table
if value is not None]
if 'key_value_pairs' in dct:
self.key_value_pairs = sorted(dct.key_value_pairs.items())
else:
self.key_value_pairs = None
if 'keywords' in dct:
self.keywords = ', '.join(sorted(dct.keywords))
else:
self.keywords = None
self.dipole = dct.get('dipole')
if self.dipole is not None:
self.dipole = ', '.join('{0:.3f}'.format(d) for d in self.dipole)
self.data = dct.get('data')
if self.data:
self.data = ', '.join(self.data.keys())
self.constraints = dct.get('constraints')
if self.constraints:
self.constraints = ', '.join(d['name'] for d in self.constraints)
def write(self):
dct = self.dct
width = max(len(name) for name, value in self.table)
for name, value in self.table:
print('{0:{width}}|{1}'.format(name, value, width=width))
print('\nUnit cell in Ang:')
print('axis|periodic| x| y| z')
c = 1
for p, axis in zip(dct.pbc, self.cell):
print(' {0}| {1}|{2[0]:>11}|{2[1]:>11}|{2[2]:>11}'.format(
c, [' no', 'yes'][p], axis))
c += 1
if self.key_value_pairs:
print('\nKey-value pairs:')
width = max(len(key) for key, value in self.key_value_pairs)
for key, value in self.key_value_pairs:
print('{0:{width}}|{1}'.format(key, value, width=width))
if self.keywords:
print('\nKeywords:', self.keywords)
if self.forces:
print('\nForces in ev/Ang:')
for f in self.forces:
print('{0:4}|{1:2}|{2}|{3}|{4}'.format(*f))
if self.stress:
print('\nStress tensor (xx, yy, zz, zy, zx, yx) in eV/Ang^3:')
print(' ', self.stress)
if self.dipole:
print('\nDipole moment in e*Ang: ({0})'.format(self.dipole))
if self.constraints:
print('\nConstraints:', self.constraints)
if self.data:
print('\nData:', self.data)
| [
"from __future__ import print_function\n",
"\n",
"from ase.db.table import dict2forces, hill\n",
"from ase.data import atomic_masses, chemical_symbols\n",
"from ase.db.core import float_to_time_string, now\n",
"\n",
"import numpy as np\n",
"\n",
"\n",
"class Summary:\n",
" def __init__(self, dct, subscript=None):\n",
" self.dct = dct\n",
" \n",
" self.cell = [['{0:.3f}'.format(a) for a in axis] for axis in dct.cell]\n",
" \n",
" forces = dict2forces(dct)\n",
" if forces is None:\n",
" fmax = None\n",
" self.forces = None\n",
" else:\n",
" fmax = (forces**2).sum(1).max()**0.5\n",
" N = len(forces)\n",
" self.forces = []\n",
" for n, f in enumerate(forces):\n",
" if n < 5 or n >= N - 5:\n",
" f = tuple('{0:10.3f}'.format(x) for x in f)\n",
" symbol = chemical_symbols[dct.numbers[n]]\n",
" self.forces.append((n, symbol) + f)\n",
" elif n == 5:\n",
" self.forces.append((' ...', '',\n",
" ' ...',\n",
" ' ...',\n",
" ' ...'))\n",
" \n",
" self.stress = dct.get('stress')\n",
" if self.stress is not None:\n",
" self.stress = ', '.join('{0:.3f}'.format(s) for s in self.stress)\n",
" \n",
" if 'masses' in dct:\n",
" mass = dct.masses.sum()\n",
" else:\n",
" mass = atomic_masses[dct.numbers].sum()\n",
" \n",
" formula = hill(dct.numbers)\n",
" if subscript:\n",
" formula = subscript.sub(r'<sub>\\1</sub>', formula)\n",
" \n",
" table = [\n",
" ('id', dct.id),\n",
" ('age', float_to_time_string(now() - dct.ctime, True)),\n",
" ('formula', formula),\n",
" ('user', dct.user),\n",
" ('calculator', dct.get('calculator')),\n",
" ('energy [eV]', dct.get('energy')),\n",
" ('fmax [eV/Ang]', fmax),\n",
" ('charge [|e|]', dct.get('charge')),\n",
" ('mass [au]', mass),\n",
" ('unique id', dct.unique_id),\n",
" ('volume [Ang^3]', abs(np.linalg.det(dct.cell)))]\n",
" self.table = [(name, value) for name, value in table\n",
" if value is not None]\n",
"\n",
" if 'key_value_pairs' in dct:\n",
" self.key_value_pairs = sorted(dct.key_value_pairs.items())\n",
" else:\n",
" self.key_value_pairs = None\n",
"\n",
" if 'keywords' in dct:\n",
" self.keywords = ', '.join(sorted(dct.keywords))\n",
" else:\n",
" self.keywords = None\n",
" \n",
" self.dipole = dct.get('dipole')\n",
" if self.dipole is not None:\n",
" self.dipole = ', '.join('{0:.3f}'.format(d) for d in self.dipole)\n",
" \n",
" self.data = dct.get('data')\n",
" if self.data:\n",
" self.data = ', '.join(self.data.keys())\n",
" \n",
" self.constraints = dct.get('constraints')\n",
" if self.constraints:\n",
" self.constraints = ', '.join(d['name'] for d in self.constraints)\n",
" \n",
" def write(self):\n",
" dct = self.dct\n",
" \n",
" width = max(len(name) for name, value in self.table)\n",
" for name, value in self.table:\n",
" print('{0:{width}}|{1}'.format(name, value, width=width))\n",
"\n",
" print('\\nUnit cell in Ang:')\n",
" print('axis|periodic| x| y| z')\n",
" c = 1\n",
" for p, axis in zip(dct.pbc, self.cell):\n",
" print(' {0}| {1}|{2[0]:>11}|{2[1]:>11}|{2[2]:>11}'.format(\n",
" c, [' no', 'yes'][p], axis))\n",
" c += 1\n",
" \n",
" if self.key_value_pairs:\n",
" print('\\nKey-value pairs:')\n",
" width = max(len(key) for key, value in self.key_value_pairs)\n",
" for key, value in self.key_value_pairs:\n",
" print('{0:{width}}|{1}'.format(key, value, width=width))\n",
" \n",
" if self.keywords:\n",
" print('\\nKeywords:', self.keywords)\n",
" \n",
" if self.forces:\n",
" print('\\nForces in ev/Ang:')\n",
" for f in self.forces:\n",
" print('{0:4}|{1:2}|{2}|{3}|{4}'.format(*f))\n",
"\n",
" if self.stress:\n",
" print('\\nStress tensor (xx, yy, zz, zy, zx, yx) in eV/Ang^3:')\n",
" print(' ', self.stress)\n",
"\n",
" if self.dipole:\n",
" print('\\nDipole moment in e*Ang: ({0})'.format(self.dipole))\n",
" \n",
" if self.constraints:\n",
" print('\\nConstraints:', self.constraints)\n",
" \n",
" if self.data:\n",
" print('\\nData:', self.data)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.07692307692307693,
0,
0
] | 125 | 0.010963 |
import sys
from metatlas import metatlas_objects as metob
import metatlas.helpers.metatlas_get_data_helper_fun as ma_data
import os
from IPython.display import display
import matplotlib.pyplot as plt
try:
import ipywidgets as widgets
except ImportError:
from IPython.html import widgets
try:
import traitlets
except ImportError:
from IPython.utils import traitlets
from ipywidgets import interact, interactive, fixed, FloatSlider
import copy
data = []
groups = []
file_names = []
compound_names = []
compound_objects = []
files_idx = dict()
compound_idx = dict()
groups_idx = dict()
# one select for the compound
wcompounds = widgets.Select(
description="compounds",
options=[]
)
# have a multiple select for the files
wfiles = widgets.SelectMultiple(
description="files",
options=[]
)
wfname = widgets.Text(
description='Atlas Name',
value='myAtlas',
)
all_files = widgets.Checkbox(
description='Select All Files',
value=False,
)
plot_button = widgets.Button(description='Plot me')
create_atlas_btn = widgets.Button(description="Create Atlas")
rtmin_widget = FloatSlider()
rtpeak_widget = FloatSlider()
rtmax_widget = FloatSlider()
###########################################################################
###
def plot_intensity(cval, fvals, rt_min, rt_max, rt_peak):
for i in range(len(fvals)):
d = data[files_idx[fvals[i]]][compound_idx[cval]]
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
###########################################################################
###
def create_atlas(b):
identifications = list()
file_names = wfiles.value
compound_name = wcompounds.value
idx2 = compound_idx[compound_name]
atlas = metob.Atlas()
atlas.name = wfname.value
# create an empty rt reference
rt_ref = metob.RtReference()
rt_ref.rt_min = rtmin_widget.value
rt_ref.rt_max = rtmax_widget.value
rt_ref.rt_peak = rtpeak_widget.value
rt_ref.rt_units = data[0][idx2]['identification'].rt_references[0].rt_units
# create an empty mz_reference
mz_ref = metob.MzReference()
mz_ref.mz = data[0][idx2]['identification'].mz_references[0].mz
mz_ref.mz_tolerance = data[0][idx2]['identification'].mz_references[0].mz_tolerance
mz_ref.mz_tolerance_units = data[0][idx2]['identification'].mz_references[0].mz_tolerance_units
mz_ref.detected_polarity = data[0][idx2]['identification'].mz_references[0].detected_polarity
identification = metob.CompoundIdentification()
identification.compoud = compound_name
identification.name = compound_name
identification.rt_references = [rt_ref]
identification.mz_references = [mz_ref]
identifications.append(identification)
atlas.compound_identifications = identifications
#metob.store(atlas)
def select_files(b):
all_files.value = not all_files.value
###########################################################################
##
def plot_button_clicked(b):
plt.cla()
plt.clf()
plt.close()
fvals = list(wfiles.value)
cval = wcompounds.value
global rtmin_widget, rtmax_widget, rtpeak_widget
min_x = list()
max_x = list()
if len(fvals) == 1 and fvals[0] == 'all':
fvals = file_names
elif len(fvals) > 1 and 'all' in fvals:
fvals.remove('all')
#if all_files.value == True:
# fvals = file_names
#else:
# fvals = wfiles.value
for i in range(len(fvals)):
d = data[files_idx[fvals[i]]][compound_idx[cval]]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
min_x.append(min(x))
max_x.append(max(x))
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
rtmin_widget.close()
rtpeak_widget.close()
rtmax_widget.close()
rtmin_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_min, color='blue')
rtpeak_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_peak, color='red')
rtmax_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_max, color='green')
interact(plot_intensity,
cval=fixed(cval),
fvals=fixed(fvals),
rt_min=rtmin_widget,
rt_peak=rtpeak_widget,
rt_max=rtmax_widget)
def dill2atlas(fname):
global data, groups, file_names, compound_names, compound_objects, files_idx, compound_idx, groups_idx
data = ma_data.get_dill_data(fname)
groups = ma_data.get_group_names(data)
file_names = ma_data.get_file_names(data)
(compound_names, compound_objects) = ma_data.get_compound_names(data)
files_idx = dict()
for f_idx, f_name in enumerate(file_names):
files_idx[f_name] = f_idx
compound_idx = dict()
for cpd_idx, cpd_name in enumerate(compound_names):
compound_idx[cpd_name] = cpd_idx
groups_idx = dict()
for grp_idx, grp_name in enumerate(groups):
groups_idx[grp_name] = grp_idx
wcompounds.options=compound_names
wfiles.options= ['all'] + file_names
display(widgets.HBox((wfname, create_atlas_btn)))
display(widgets.HBox((wcompounds, wfiles)))
display(plot_button)
plot_button.on_click(plot_button_clicked)
create_atlas_btn.on_click(create_atlas)
all_files.observe(select_files)
| [
"import sys\n",
"\n",
"from metatlas import metatlas_objects as metob\n",
"import metatlas.helpers.metatlas_get_data_helper_fun as ma_data\n",
"import os\n",
"\n",
"\n",
"from IPython.display import display\n",
"import matplotlib.pyplot as plt\n",
"try:\n",
" import ipywidgets as widgets\n",
"except ImportError:\n",
" from IPython.html import widgets\n",
"try:\n",
" import traitlets\n",
"except ImportError:\n",
" from IPython.utils import traitlets\n",
"\n",
"from ipywidgets import interact, interactive, fixed, FloatSlider\n",
"\n",
"\n",
"import copy\n",
"\n",
"\n",
"\n",
"data = []\n",
"groups = []\n",
"file_names = []\n",
"compound_names = []\n",
"compound_objects = []\n",
"\n",
"files_idx = dict()\n",
"compound_idx = dict()\n",
"groups_idx = dict()\n",
"\n",
"\n",
"\n",
"# one select for the compound\n",
"wcompounds = widgets.Select(\n",
" description=\"compounds\",\n",
" options=[]\n",
")\n",
"\n",
"# have a multiple select for the files\n",
"wfiles = widgets.SelectMultiple(\n",
" description=\"files\",\n",
" options=[]\n",
")\n",
"\n",
"wfname = widgets.Text(\n",
" description='Atlas Name',\n",
" value='myAtlas',\n",
")\n",
"\n",
"all_files = widgets.Checkbox(\n",
" description='Select All Files',\n",
" value=False,\n",
")\n",
"\n",
"plot_button = widgets.Button(description='Plot me')\n",
"create_atlas_btn = widgets.Button(description=\"Create Atlas\")\n",
"\n",
"rtmin_widget = FloatSlider()\n",
"rtpeak_widget = FloatSlider()\n",
"rtmax_widget = FloatSlider()\n",
"\n",
"\n",
"###########################################################################\n",
"###\n",
"def plot_intensity(cval, fvals, rt_min, rt_max, rt_peak):\n",
" for i in range(len(fvals)):\n",
" d = data[files_idx[fvals[i]]][compound_idx[cval]]\n",
"\n",
" if len(d['data']['eic']['rt']) > 0:\n",
" x = d['data']['eic']['rt']\n",
" y = d['data']['eic']['intensity']\n",
" plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)\n",
"\n",
" plt.axvline(rt_min, color='b', linewidth=2.0)\n",
" plt.axvline(rt_max, color='g', linewidth=2.0)\n",
" plt.axvline(rt_peak, color='r', linewidth=2.0)\n",
"\n",
"\n",
"###########################################################################\n",
"###\n",
"def create_atlas(b):\n",
" identifications = list()\n",
" file_names = wfiles.value\n",
" compound_name = wcompounds.value\n",
" idx2 = compound_idx[compound_name]\n",
"\n",
" atlas = metob.Atlas()\n",
" atlas.name = wfname.value\n",
"\n",
" # create an empty rt reference\n",
" rt_ref = metob.RtReference()\n",
"\n",
" rt_ref.rt_min = rtmin_widget.value\n",
" rt_ref.rt_max = rtmax_widget.value\n",
" rt_ref.rt_peak = rtpeak_widget.value\n",
" rt_ref.rt_units = data[0][idx2]['identification'].rt_references[0].rt_units\n",
"\n",
" # create an empty mz_reference\n",
" mz_ref = metob.MzReference()\n",
"\n",
" mz_ref.mz = data[0][idx2]['identification'].mz_references[0].mz\n",
" mz_ref.mz_tolerance = data[0][idx2]['identification'].mz_references[0].mz_tolerance\n",
" mz_ref.mz_tolerance_units = data[0][idx2]['identification'].mz_references[0].mz_tolerance_units\n",
" mz_ref.detected_polarity = data[0][idx2]['identification'].mz_references[0].detected_polarity\n",
"\n",
" identification = metob.CompoundIdentification()\n",
" identification.compoud = compound_name\n",
" identification.name = compound_name\n",
" identification.rt_references = [rt_ref]\n",
" identification.mz_references = [mz_ref]\n",
"\n",
" identifications.append(identification)\n",
"\n",
" atlas.compound_identifications = identifications\n",
" #metob.store(atlas)\n",
"\n",
"\n",
"def select_files(b):\n",
" all_files.value = not all_files.value\n",
"\n",
"\n",
"###########################################################################\n",
"##\n",
"def plot_button_clicked(b):\n",
" plt.cla()\n",
" plt.clf()\n",
" plt.close()\n",
"\n",
" fvals = list(wfiles.value)\n",
" cval = wcompounds.value\n",
" global rtmin_widget, rtmax_widget, rtpeak_widget\n",
"\n",
" min_x = list()\n",
" max_x = list()\n",
"\n",
" if len(fvals) == 1 and fvals[0] == 'all':\n",
" fvals = file_names\n",
" elif len(fvals) > 1 and 'all' in fvals:\n",
" fvals.remove('all')\n",
"\n",
"\n",
"\n",
"\n",
"\n",
" #if all_files.value == True:\n",
" # fvals = file_names\n",
" #else:\n",
" # fvals = wfiles.value\n",
"\n",
" for i in range(len(fvals)):\n",
" d = data[files_idx[fvals[i]]][compound_idx[cval]]\n",
" rt_min = d['identification'].rt_references[0].rt_min\n",
" rt_max = d['identification'].rt_references[0].rt_max\n",
" rt_peak = d['identification'].rt_references[0].rt_peak\n",
"\n",
" if len(d['data']['eic']['rt']) > 0:\n",
" x = d['data']['eic']['rt']\n",
" y = d['data']['eic']['intensity']\n",
" min_x.append(min(x))\n",
" max_x.append(max(x))\n",
" plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)\n",
"\n",
" plt.axvline(rt_min, color='b', linewidth=2.0)\n",
" plt.axvline(rt_max, color='g', linewidth=2.0)\n",
" plt.axvline(rt_peak, color='r', linewidth=2.0)\n",
"\n",
" rtmin_widget.close()\n",
" rtpeak_widget.close()\n",
" rtmax_widget.close()\n",
" rtmin_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_min, color='blue')\n",
" rtpeak_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_peak, color='red')\n",
" rtmax_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_max, color='green')\n",
" interact(plot_intensity,\n",
" cval=fixed(cval),\n",
" fvals=fixed(fvals),\n",
" rt_min=rtmin_widget,\n",
" rt_peak=rtpeak_widget,\n",
" rt_max=rtmax_widget)\n",
"\n",
"\n",
"def dill2atlas(fname):\n",
" global data, groups, file_names, compound_names, compound_objects, files_idx, compound_idx, groups_idx\n",
"\n",
" data = ma_data.get_dill_data(fname)\n",
" groups = ma_data.get_group_names(data)\n",
" file_names = ma_data.get_file_names(data)\n",
" (compound_names, compound_objects) = ma_data.get_compound_names(data)\n",
"\n",
" files_idx = dict()\n",
" for f_idx, f_name in enumerate(file_names):\n",
" files_idx[f_name] = f_idx\n",
"\n",
" compound_idx = dict()\n",
" for cpd_idx, cpd_name in enumerate(compound_names):\n",
" compound_idx[cpd_name] = cpd_idx\n",
"\n",
" groups_idx = dict()\n",
" for grp_idx, grp_name in enumerate(groups):\n",
" groups_idx[grp_name] = grp_idx\n",
"\n",
"\n",
" wcompounds.options=compound_names\n",
"\n",
" wfiles.options= ['all'] + file_names\n",
"\n",
"\n",
"\n",
" display(widgets.HBox((wfname, create_atlas_btn)))\n",
" display(widgets.HBox((wcompounds, wfiles)))\n",
" display(plot_button)\n",
"\n",
"\n",
" plot_button.on_click(plot_button_clicked)\n",
" create_atlas_btn.on_click(create_atlas)\n",
" all_files.observe(select_files)\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.01,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06060606060606061,
0,
0.09090909090909091,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0.009708737864077669,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0.024390243902439025,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
1
] | 221 | 0.00685 |
#!/usr/bin/env python
'''
Plots the gravitational potential of the earth and moon
assumes the earth is at 0,0 and moon is on x-axis.
Inspired by http://www.wired.com/2014/07/contour-plots-with-python-and-plotly/?mbid=social_twitter
and: https://gist.github.com/rhettallain/1aa12b44d59562ce08fc
Practicing plotly
'''
__author__ = 'julenka'
import numpy as np
import plotly.plotly as py
import math
from plotly.graph_objs import *
massEarth = 5.9729e24
massMoon = 7.3477e22
distanceFromEarthToMoon = 384400 # units is km
G = 6.67384e-11
earthOffset = 100000
earthLocation = [earthOffset,earthOffset]
moonLocation = [earthOffset + distanceFromEarthToMoon/2,
earthOffset + math.sqrt(math.pow(distanceFromEarthToMoon,2) - math.pow(distanceFromEarthToMoon/2, 2))]
# make mesh
step = 10000
x = np.arange(1, 2 * distanceFromEarthToMoon, step)
y = np.arange(1, 2 * distanceFromEarthToMoon, step)
X,Y = np.meshgrid(x,y)
# gravitational potential values
V = X * 0
Vmax = 4e10
for r in range(len(X)):
for c in range(len(Y)):
currentLocation = np.array([X[r,c], Y[r,c]])
distanceToEarth = np.linalg.norm(np.subtract(currentLocation, earthLocation))
gpFromEarth = G * massEarth / distanceToEarth #V(r) = Gm/r
distanceToMoon = np.linalg.norm(np.subtract(currentLocation, moonLocation))
gpFromMoon = G * massEarth / distanceToMoon #V(r) = Gm/r
totalGp = max(0, min(Vmax, gpFromEarth + gpFromMoon))
V[r,c] = totalGp
data=[{'x':x, 'y':y, 'z':V, 'type':'contour'}]
plot_url = py.plot(data, filename='earth-moon gravitational potential') | [
"#!/usr/bin/env python\n",
"'''\n",
"Plots the gravitational potential of the earth and moon\n",
"assumes the earth is at 0,0 and moon is on x-axis.\n",
"\n",
"Inspired by http://www.wired.com/2014/07/contour-plots-with-python-and-plotly/?mbid=social_twitter\n",
"and: https://gist.github.com/rhettallain/1aa12b44d59562ce08fc\n",
"\n",
"Practicing plotly\n",
"'''\n",
"\n",
"__author__ = 'julenka'\n",
"\n",
"import numpy as np\n",
"import plotly.plotly as py\n",
"import math\n",
"from plotly.graph_objs import *\n",
"\n",
"massEarth = 5.9729e24\n",
"massMoon = 7.3477e22\n",
"distanceFromEarthToMoon = 384400 # units is km\n",
"G = 6.67384e-11\n",
"\n",
"earthOffset = 100000\n",
"earthLocation = [earthOffset,earthOffset]\n",
"moonLocation = [earthOffset + distanceFromEarthToMoon/2,\n",
" earthOffset + math.sqrt(math.pow(distanceFromEarthToMoon,2) - math.pow(distanceFromEarthToMoon/2, 2))]\n",
"\n",
"# make mesh\n",
"step = 10000\n",
"x = np.arange(1, 2 * distanceFromEarthToMoon, step)\n",
"y = np.arange(1, 2 * distanceFromEarthToMoon, step)\n",
"X,Y = np.meshgrid(x,y)\n",
"\n",
"\n",
"# gravitational potential values\n",
"V = X * 0\n",
"Vmax = 4e10\n",
"\n",
"for r in range(len(X)):\n",
" for c in range(len(Y)):\n",
" currentLocation = np.array([X[r,c], Y[r,c]])\n",
"\n",
" distanceToEarth = np.linalg.norm(np.subtract(currentLocation, earthLocation))\n",
" gpFromEarth = G * massEarth / distanceToEarth #V(r) = Gm/r\n",
"\n",
" distanceToMoon = np.linalg.norm(np.subtract(currentLocation, moonLocation))\n",
" gpFromMoon = G * massEarth / distanceToMoon #V(r) = Gm/r\n",
"\n",
" totalGp = max(0, min(Vmax, gpFromEarth + gpFromMoon))\n",
" V[r,c] = totalGp\n",
"\n",
"data=[{'x':x, 'y':y, 'z':V, 'type':'contour'}]\n",
"plot_url = py.plot(data, filename='earth-moon gravitational potential')"
] | [
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0.023809523809523808,
0,
0.01680672268907563,
0,
0,
0,
0,
0,
0.08695652173913043,
0,
0,
0,
0,
0,
0,
0,
0,
0.03773584905660377,
0,
0.011627906976744186,
0.029850746268656716,
0,
0.011904761904761904,
0.03076923076923077,
0,
0,
0.04,
0,
0.10638297872340426,
0.014084507042253521
] | 54 | 0.008172 |
"""
EULA APP
This module provides additional functionality to the EUAL app.
Classes:
EULAAcceptedMixin
Functions:
n/a
Created on 23 Oct 2013
@author: michael
"""
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from tunobase.eula import models, utils
class EULAAcceptedMixin(object):
"""Render EUAL to users."""
eula_url = 'eula_sign'
raise_exception = False
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
"""
Confirm that the EULA has been agreed to by the user
and if not render EULA.
"""
self.eula = get_object_or_404(models.EULA).latest_version()
# If the user has accepted the latest EULA
if not models.UserEULA.objects\
.filter(user=request.user, eula=self.eula)\
.exists():
if self.raise_exception:
raise PermissionDenied
else:
if hasattr(self, 'get_object'):
self.object = self.get_object()
eula_url_kwargs = {
'content_type_id': ContentType.objects\
.get_for_model(self.object).id,
'object_pk': self.object.pk
}
eula_url = reverse_lazy(
self.eula_url, kwargs=eula_url_kwargs
)
else:
eula_url = reverse_lazy(self.eula_url)
return utils.redirect_to_eula(
request.get_full_path(),
eula_url
)
return super(EULAAcceptedMixin, self).dispatch(
request,
*args,
**kwargs
)
| [
"\"\"\"\n",
"EULA APP\n",
"\n",
"This module provides additional functionality to the EUAL app.\n",
"\n",
"Classes:\n",
" EULAAcceptedMixin\n",
"\n",
"Functions:\n",
" n/a\n",
"\n",
"Created on 23 Oct 2013\n",
"\n",
"@author: michael\n",
"\n",
"\"\"\"\n",
"from django.core.exceptions import PermissionDenied\n",
"from django.core.urlresolvers import reverse_lazy\n",
"from django.contrib.auth.decorators import login_required\n",
"from django.contrib.contenttypes.models import ContentType\n",
"from django.shortcuts import get_object_or_404\n",
"from django.utils.decorators import method_decorator\n",
"\n",
"from tunobase.eula import models, utils\n",
"\n",
"class EULAAcceptedMixin(object):\n",
" \"\"\"Render EUAL to users.\"\"\"\n",
"\n",
" eula_url = 'eula_sign'\n",
" raise_exception = False\n",
"\n",
" @method_decorator(login_required)\n",
" def dispatch(self, request, *args, **kwargs):\n",
" \"\"\"\n",
" Confirm that the EULA has been agreed to by the user\n",
" and if not render EULA.\n",
"\n",
" \"\"\"\n",
" self.eula = get_object_or_404(models.EULA).latest_version()\n",
"\n",
" # If the user has accepted the latest EULA\n",
" if not models.UserEULA.objects\\\n",
" .filter(user=request.user, eula=self.eula)\\\n",
" .exists():\n",
" if self.raise_exception:\n",
" raise PermissionDenied\n",
" else:\n",
" if hasattr(self, 'get_object'):\n",
" self.object = self.get_object()\n",
" eula_url_kwargs = {\n",
" 'content_type_id': ContentType.objects\\\n",
" .get_for_model(self.object).id,\n",
" 'object_pk': self.object.pk\n",
" }\n",
" eula_url = reverse_lazy(\n",
" self.eula_url, kwargs=eula_url_kwargs\n",
" )\n",
" else:\n",
" eula_url = reverse_lazy(self.eula_url)\n",
"\n",
" return utils.redirect_to_eula(\n",
" request.get_full_path(),\n",
" eula_url\n",
" )\n",
"\n",
" return super(EULAAcceptedMixin, self).dispatch(\n",
" request,\n",
" *args,\n",
" **kwargs\n",
" )\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 70 | 0.000879 |
"""Fixtures for tests in this directory."""
import multiprocessing
import multiprocessing.queues
import sys
import httpretty
import pytest
from sphinx import build_main
def run_build_main(docs_dir, html_dir, overflow):
"""Run build_main().
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param iter overflow: Append these args to sphinx-build call.
:return: Value from build_main().
:rtype: int
"""
argv = ('sphinx-build', str(docs_dir), str(html_dir))
if overflow:
argv += overflow
result = build_main(argv)
return result
def run_build_main_post_multiprocessing(docs_dir, html_dir, cached_responses, queue, overflow):
"""Run Sphinx's build_main after setting up httpretty mock responses. Called by multiprocess.Process.
Need to use this instead of httpretty pytest fixtures since forking doesn't exist in Windows and multiprocess runs
in "spawn" mode. This means that everything setup by pytest is lost since subprocesses are generated from scratch on
Windows.
:raise: RuntimeError on Sphinx non-zero exit. This causes multiprocessing.Process().exitcode to be != 0.
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param dict cached_responses: URL keys and serialized JSON values.
:param multiprocessing.queues.Queue queue: Queue to transmit stdout/err back to parent process.
:param iter overflow: Append these args to sphinx-build call.
"""
# Capture stdout/stderr after forking/spawning.
capture = __import__('_pytest').capture
try:
capsys = capture.CaptureFixture(capture.SysCapture)
except TypeError:
capsys = capture.CaptureFixture(capture.SysCapture, None)
getattr(capsys, '_start')()
# Re-run httpretty on Windows (due to lack of forking).
if sys.platform == 'win32':
httpretty.enable()
if cached_responses:
for url, body in cached_responses.items():
httpretty.register_uri(httpretty.GET, url, body=body)
# Run.
result = run_build_main(docs_dir, html_dir, overflow)
stdout, stderr = capsys.readouterr()
queue.put((stdout, stderr))
if result != 0:
raise RuntimeError(result, stdout, stderr)
def pytest_namespace():
"""Add objects to the pytest namespace.
E.g. Returning {'func': lambda: True} allows import pytest; assert pytest.func() is True.
:return: Namespace names and objects.
:rtype: dict
"""
def add_page(root, name, append=''):
"""Add a page to the sample Sphinx docs.
:param py.path.local root: Path to docs root dir.
:param str name: Page name.
:param str append: Append text to RST document body.
:return: Path to new page RST file.
:rtype: py.path.local
"""
root.join('contents.rst').write(' {}\n'.format(name), mode='a')
page = root.join('{}.rst'.format(name))
page.write('.. _{}:\n\n{}\n{}\n\n{}'.format(name, name.capitalize(), '=' * len(name), append))
return page
def build_isolated(docs_dir, html_dir, cached_responses, overflow=None):
"""Run build_main() through multiprocessing.Process.
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param dict cached_responses: URL keys and serialized JSON values.
:param iter overflow: Append these args to sphinx-build call.
:return: Exit code of subprocess, stdout, and stderr.
:rtype: tuple
"""
queue = multiprocessing.Queue()
args = docs_dir, html_dir, cached_responses, queue, overflow
child = multiprocessing.Process(target=run_build_main_post_multiprocessing, args=args)
child.start()
child.join()
result = child.exitcode
try:
stdout, stderr = queue.get(False)
except multiprocessing.queues.Empty:
stdout, stderr = '', ''
return result, stdout, stderr
return dict(add_page=add_page, build_isolated=build_isolated)
@pytest.fixture
def docs(tmpdir):
"""Create sample docs used in this test module.
:param tmpdir: pytest fixture.
:return: Path to docs root.
:rtype: py.path
"""
root = tmpdir.ensure_dir('docs')
# Create Sphinx config.
root.join('conf.py').write("extensions = ['sphinxcontrib.imgur']\nimgur_client_id = 'a0b1c2d3e4f56789'\n")
# Create Sphinx docs.
root.join('contents.rst').write(
'Test\n'
'====\n'
'\n'
'Sample documentation.\n'
'\n'
'.. toctree::\n'
' ignore\n'
)
root.join('ignore.rst').write('.. _ignore:\n\nIgnore\n======\n\nHello World.\n')
return root
| [
"\"\"\"Fixtures for tests in this directory.\"\"\"\n",
"\n",
"import multiprocessing\n",
"import multiprocessing.queues\n",
"import sys\n",
"\n",
"import httpretty\n",
"import pytest\n",
"from sphinx import build_main\n",
"\n",
"\n",
"def run_build_main(docs_dir, html_dir, overflow):\n",
" \"\"\"Run build_main().\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
"\n",
" :return: Value from build_main().\n",
" :rtype: int\n",
" \"\"\"\n",
" argv = ('sphinx-build', str(docs_dir), str(html_dir))\n",
" if overflow:\n",
" argv += overflow\n",
" result = build_main(argv)\n",
" return result\n",
"\n",
"\n",
"def run_build_main_post_multiprocessing(docs_dir, html_dir, cached_responses, queue, overflow):\n",
" \"\"\"Run Sphinx's build_main after setting up httpretty mock responses. Called by multiprocess.Process.\n",
"\n",
" Need to use this instead of httpretty pytest fixtures since forking doesn't exist in Windows and multiprocess runs\n",
" in \"spawn\" mode. This means that everything setup by pytest is lost since subprocesses are generated from scratch on\n",
" Windows.\n",
"\n",
" :raise: RuntimeError on Sphinx non-zero exit. This causes multiprocessing.Process().exitcode to be != 0.\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param dict cached_responses: URL keys and serialized JSON values.\n",
" :param multiprocessing.queues.Queue queue: Queue to transmit stdout/err back to parent process.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
" \"\"\"\n",
" # Capture stdout/stderr after forking/spawning.\n",
" capture = __import__('_pytest').capture\n",
" try:\n",
" capsys = capture.CaptureFixture(capture.SysCapture)\n",
" except TypeError:\n",
" capsys = capture.CaptureFixture(capture.SysCapture, None)\n",
" getattr(capsys, '_start')()\n",
"\n",
" # Re-run httpretty on Windows (due to lack of forking).\n",
" if sys.platform == 'win32':\n",
" httpretty.enable()\n",
" if cached_responses:\n",
" for url, body in cached_responses.items():\n",
" httpretty.register_uri(httpretty.GET, url, body=body)\n",
"\n",
" # Run.\n",
" result = run_build_main(docs_dir, html_dir, overflow)\n",
" stdout, stderr = capsys.readouterr()\n",
" queue.put((stdout, stderr))\n",
" if result != 0:\n",
" raise RuntimeError(result, stdout, stderr)\n",
"\n",
"\n",
"def pytest_namespace():\n",
" \"\"\"Add objects to the pytest namespace.\n",
"\n",
" E.g. Returning {'func': lambda: True} allows import pytest; assert pytest.func() is True.\n",
"\n",
" :return: Namespace names and objects.\n",
" :rtype: dict\n",
" \"\"\"\n",
" def add_page(root, name, append=''):\n",
" \"\"\"Add a page to the sample Sphinx docs.\n",
"\n",
" :param py.path.local root: Path to docs root dir.\n",
" :param str name: Page name.\n",
" :param str append: Append text to RST document body.\n",
"\n",
" :return: Path to new page RST file.\n",
" :rtype: py.path.local\n",
" \"\"\"\n",
" root.join('contents.rst').write(' {}\\n'.format(name), mode='a')\n",
" page = root.join('{}.rst'.format(name))\n",
" page.write('.. _{}:\\n\\n{}\\n{}\\n\\n{}'.format(name, name.capitalize(), '=' * len(name), append))\n",
" return page\n",
"\n",
" def build_isolated(docs_dir, html_dir, cached_responses, overflow=None):\n",
" \"\"\"Run build_main() through multiprocessing.Process.\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param dict cached_responses: URL keys and serialized JSON values.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
"\n",
" :return: Exit code of subprocess, stdout, and stderr.\n",
" :rtype: tuple\n",
" \"\"\"\n",
" queue = multiprocessing.Queue()\n",
" args = docs_dir, html_dir, cached_responses, queue, overflow\n",
" child = multiprocessing.Process(target=run_build_main_post_multiprocessing, args=args)\n",
" child.start()\n",
" child.join()\n",
" result = child.exitcode\n",
" try:\n",
" stdout, stderr = queue.get(False)\n",
" except multiprocessing.queues.Empty:\n",
" stdout, stderr = '', ''\n",
" return result, stdout, stderr\n",
"\n",
" return dict(add_page=add_page, build_isolated=build_isolated)\n",
"\n",
"\n",
"@pytest.fixture\n",
"def docs(tmpdir):\n",
" \"\"\"Create sample docs used in this test module.\n",
"\n",
" :param tmpdir: pytest fixture.\n",
"\n",
" :return: Path to docs root.\n",
" :rtype: py.path\n",
" \"\"\"\n",
" root = tmpdir.ensure_dir('docs')\n",
"\n",
" # Create Sphinx config.\n",
" root.join('conf.py').write(\"extensions = ['sphinxcontrib.imgur']\\nimgur_client_id = 'a0b1c2d3e4f56789'\\n\")\n",
"\n",
" # Create Sphinx docs.\n",
" root.join('contents.rst').write(\n",
" 'Test\\n'\n",
" '====\\n'\n",
" '\\n'\n",
" 'Sample documentation.\\n'\n",
" '\\n'\n",
" '.. toctree::\\n'\n",
" ' ignore\\n'\n",
" )\n",
" root.join('ignore.rst').write('.. _ignore:\\n\\nIgnore\\n======\\n\\nHello World.\\n')\n",
"\n",
" return root\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0.009433962264150943,
0,
0.008403361344537815,
0.008264462809917356,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0
] | 142 | 0.000756 |
# Copyright 2017 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
LOG = logging.getLogger(__name__)
# A list of features and their supported microversions. Note that these are
# explicit functioning versions, not a range.
# There should be a minimum of two versions per feature. The first entry in
# this list should always be the lowest possible API microversion for a
# feature i.e. the version at which that feature was introduced. The second
# entry should be the current service version when the feature was added to
# horizon.
# Further documentation can be found at
# https://docs.openstack.org/horizon/latest/contributor/topics/
# microversion_support.html
MICROVERSION_FEATURES = {
"nova": {
"locked_attribute": ["2.9", "2.42"],
"instance_description": ["2.19", "2.42"],
"remote_console_mks": ["2.8", "2.53"],
"servergroup_soft_policies": ["2.15", "2.60"],
"servergroup_user_info": ["2.13", "2.60"],
"multiattach": ["2.60"],
"auto_allocated_network": ["2.37", "2.42"],
"key_types": ["2.2", "2.9"],
"key_type_list": ["2.9"],
},
"cinder": {
"groups": ["3.27", "3.43", "3.48", "3.58"],
"consistency_groups": ["2.0", "3.10"],
"message_list": ["3.5", "3.29"],
"limits_project_id_query": ["3.43", "3.50", "3.55"],
}
}
class MicroVersionNotFound(Exception):
def __init__(self, features):
self.features = features
def __str__(self):
return "Insufficient microversion for %s" % self.features
def get_requested_versions(service, features):
if not features:
return None
# Convert a single feature string into a list for backward compatibility.
if isinstance(features, str):
features = [features]
try:
service_features = MICROVERSION_FEATURES[service]
except KeyError:
LOG.debug("'%s' could not be found in the MICROVERSION_FEATURES dict",
service)
return None
feature_versions = set(service_features[features[0]])
for feature in features[1:]:
feature_versions &= set(service_features[feature])
if not feature_versions:
return None
# Sort version candidates from larger versins
feature_versions = sorted(feature_versions, reverse=True,
key=lambda v: [int(i) for i in v.split('.')])
return feature_versions
# NOTE(robcresswell): Since each client implements their own wrapper class for
# API objects, we'll need to allow that to be passed in. In the future this
# should be replaced by some common handling in Oslo.
def get_microversion_for_features(service, features, wrapper_class,
min_ver, max_ver):
"""Retrieves that highest known functional microversion for features"""
feature_versions = get_requested_versions(service, features)
if not feature_versions:
return None
for version in feature_versions:
microversion = wrapper_class(version)
if microversion.matches(min_ver, max_ver):
return microversion
return None
| [
"# Copyright 2017 Cisco Systems\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import logging\n",
"\n",
"LOG = logging.getLogger(__name__)\n",
"\n",
"# A list of features and their supported microversions. Note that these are\n",
"# explicit functioning versions, not a range.\n",
"# There should be a minimum of two versions per feature. The first entry in\n",
"# this list should always be the lowest possible API microversion for a\n",
"# feature i.e. the version at which that feature was introduced. The second\n",
"# entry should be the current service version when the feature was added to\n",
"# horizon.\n",
"# Further documentation can be found at\n",
"# https://docs.openstack.org/horizon/latest/contributor/topics/\n",
"# microversion_support.html\n",
"MICROVERSION_FEATURES = {\n",
" \"nova\": {\n",
" \"locked_attribute\": [\"2.9\", \"2.42\"],\n",
" \"instance_description\": [\"2.19\", \"2.42\"],\n",
" \"remote_console_mks\": [\"2.8\", \"2.53\"],\n",
" \"servergroup_soft_policies\": [\"2.15\", \"2.60\"],\n",
" \"servergroup_user_info\": [\"2.13\", \"2.60\"],\n",
" \"multiattach\": [\"2.60\"],\n",
" \"auto_allocated_network\": [\"2.37\", \"2.42\"],\n",
" \"key_types\": [\"2.2\", \"2.9\"],\n",
" \"key_type_list\": [\"2.9\"],\n",
" },\n",
" \"cinder\": {\n",
" \"groups\": [\"3.27\", \"3.43\", \"3.48\", \"3.58\"],\n",
" \"consistency_groups\": [\"2.0\", \"3.10\"],\n",
" \"message_list\": [\"3.5\", \"3.29\"],\n",
" \"limits_project_id_query\": [\"3.43\", \"3.50\", \"3.55\"],\n",
" }\n",
"}\n",
"\n",
"\n",
"class MicroVersionNotFound(Exception):\n",
" def __init__(self, features):\n",
" self.features = features\n",
"\n",
" def __str__(self):\n",
" return \"Insufficient microversion for %s\" % self.features\n",
"\n",
"\n",
"def get_requested_versions(service, features):\n",
" if not features:\n",
" return None\n",
" # Convert a single feature string into a list for backward compatibility.\n",
" if isinstance(features, str):\n",
" features = [features]\n",
" try:\n",
" service_features = MICROVERSION_FEATURES[service]\n",
" except KeyError:\n",
" LOG.debug(\"'%s' could not be found in the MICROVERSION_FEATURES dict\",\n",
" service)\n",
" return None\n",
"\n",
" feature_versions = set(service_features[features[0]])\n",
" for feature in features[1:]:\n",
" feature_versions &= set(service_features[feature])\n",
" if not feature_versions:\n",
" return None\n",
" # Sort version candidates from larger versins\n",
" feature_versions = sorted(feature_versions, reverse=True,\n",
" key=lambda v: [int(i) for i in v.split('.')])\n",
" return feature_versions\n",
"\n",
"\n",
"# NOTE(robcresswell): Since each client implements their own wrapper class for\n",
"# API objects, we'll need to allow that to be passed in. In the future this\n",
"# should be replaced by some common handling in Oslo.\n",
"def get_microversion_for_features(service, features, wrapper_class,\n",
" min_ver, max_ver):\n",
" \"\"\"Retrieves that highest known functional microversion for features\"\"\"\n",
" feature_versions = get_requested_versions(service, features)\n",
" if not feature_versions:\n",
" return None\n",
"\n",
" for version in feature_versions:\n",
" microversion = wrapper_class(version)\n",
" if microversion.matches(min_ver, max_ver):\n",
" return microversion\n",
" return None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 96 | 0 |
import numpy
from chainer.backends import cuda
from chainer.functions.loss import black_out
from chainer import link
from chainer.utils import walker_alias
from chainer import variable
class BlackOut(link.Link):
"""BlackOut loss layer.
.. seealso:: :func:`~chainer.functions.black_out` for more detail.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
Attributes:
W (~chainer.Parameter): Weight parameter matrix.
"""
sample_data = None
def __init__(self, in_size, counts, sample_size):
super(BlackOut, self).__init__()
vocab_size = len(counts)
p = numpy.array(counts, dtype=numpy.float32)
self.sampler = walker_alias.WalkerAlias(p)
self.sample_size = sample_size
with self.init_scope():
self.W = variable.Parameter(shape=(vocab_size, in_size))
def to_cpu(self):
super(BlackOut, self).to_cpu()
self.sampler.to_cpu()
def to_gpu(self, device=None):
with cuda._get_device(device):
super(BlackOut, self).to_gpu()
self.sampler.to_gpu()
def forward(self, x, t):
"""Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input of the weight matrix multiplication.
t (~chainer.Variable): Batch of ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
batch_size = x.shape[0]
if self.sample_data is not None:
# for test
sample_data = self.sample_data
else:
shape = (batch_size, self.sample_size)
sample_data = self.sampler.sample(shape)
samples = variable.Variable(sample_data)
return black_out.black_out(x, t, self.W, samples)
| [
"import numpy\n",
"\n",
"from chainer.backends import cuda\n",
"from chainer.functions.loss import black_out\n",
"from chainer import link\n",
"from chainer.utils import walker_alias\n",
"from chainer import variable\n",
"\n",
"\n",
"class BlackOut(link.Link):\n",
"\n",
" \"\"\"BlackOut loss layer.\n",
"\n",
" .. seealso:: :func:`~chainer.functions.black_out` for more detail.\n",
"\n",
" Args:\n",
" in_size (int): Dimension of input vectors.\n",
" counts (int list): Number of each identifiers.\n",
" sample_size (int): Number of negative samples.\n",
"\n",
" Attributes:\n",
" W (~chainer.Parameter): Weight parameter matrix.\n",
"\n",
" \"\"\"\n",
"\n",
" sample_data = None\n",
"\n",
" def __init__(self, in_size, counts, sample_size):\n",
" super(BlackOut, self).__init__()\n",
" vocab_size = len(counts)\n",
" p = numpy.array(counts, dtype=numpy.float32)\n",
" self.sampler = walker_alias.WalkerAlias(p)\n",
" self.sample_size = sample_size\n",
"\n",
" with self.init_scope():\n",
" self.W = variable.Parameter(shape=(vocab_size, in_size))\n",
"\n",
" def to_cpu(self):\n",
" super(BlackOut, self).to_cpu()\n",
" self.sampler.to_cpu()\n",
"\n",
" def to_gpu(self, device=None):\n",
" with cuda._get_device(device):\n",
" super(BlackOut, self).to_gpu()\n",
" self.sampler.to_gpu()\n",
"\n",
" def forward(self, x, t):\n",
" \"\"\"Computes the loss value for given input and ground truth labels.\n",
"\n",
" Args:\n",
" x (~chainer.Variable): Input of the weight matrix multiplication.\n",
" t (~chainer.Variable): Batch of ground truth labels.\n",
"\n",
" Returns:\n",
" ~chainer.Variable: Loss value.\n",
"\n",
" \"\"\"\n",
"\n",
" batch_size = x.shape[0]\n",
" if self.sample_data is not None:\n",
" # for test\n",
" sample_data = self.sample_data\n",
" else:\n",
" shape = (batch_size, self.sample_size)\n",
" sample_data = self.sampler.sample(shape)\n",
" samples = variable.Variable(sample_data)\n",
" return black_out.black_out(x, t, self.W, samples)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 67 | 0 |
#!/usr/bin/python
#
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commmon helper methods for creating an SSH connection."""
import cStringIO
import push_exceptions as exceptions
import gflags
import logging
import paramiko
import socket
import threading
gflags.DEFINE_string('paramiko_ssh_config',
'',
'Use this file to pass options using the same format as '
'OpenSSH.')
FLAGS = gflags.FLAGS
TIMEOUT_DEFAULT = 20.0
class Error(Exception):
pass
class ScpError(Error):
"""An error occurred while attempting a SCP copy."""
class ScpTimeoutError(ScpError):
"""A device failed to respond to a SCP command within the timeout."""
class ScpMinorError(ScpError):
"""A device reported a SCP minor error."""
class ScpMajorError(ScpError):
"""A device reported a SCP major error."""
class ScpProtocolError(ScpError):
"""An unexpected SCP error occurred."""
class ScpChannelError(ScpError):
"""An error occurred with the SCP channel."""
class ScpClosedError(ScpError):
"""A device closed the SCP connection."""
class SshConfigError(ScpError):
"""The configuration file is either missing or malformed."""
class SshOptions(object):
"""Singleton wrapper class around the SSH configuration.
This class creates a SSHOption object if the command line flag
--paramiko_ssh_config was found and store the result for future
use. Since this class is called from several threads, it uses a lock
to protect concurrent attempts to load the configuration.
"""
_lock = threading.Lock()
_need_init = True
_ssh_options = None
def __init__(self):
"""Read the configuration if present and store it for later.
Check if the flag --paramiko_ssh_config was set and parse the
configuration file.
"""
# This flag may be set by another thread concurrently. We will
# check the value again under a lock.
if SshOptions._need_init:
try:
with SshOptions._lock:
if SshOptions._need_init and FLAGS.paramiko_ssh_config:
logging.debug(
'Reading configuration from %s', FLAGS.paramiko_ssh_config)
try:
configfile = open(FLAGS.paramiko_ssh_config)
ssh_config = paramiko.SSHConfig()
ssh_config.parse(configfile)
SshOptions._ssh_options = ssh_config
except Exception as e: # pylint: disable=broad-except
# Unfortunately paramiko raises "Exception" if there is an
# error in the config file.
logging.fatal('Unable to read or parse "%s": %s',
FLAGS.paramiko_ssh_config, e)
finally:
SshOptions._need_init = False
def Lookup(self, hostname, port, username):
"""Translate the hostname, port and username using the configuration.
If the port is not defined, 22 is used. If the username is not
defined and no option override it, it will remain undefined.
Args:
hostname: A string, the hostname to use as the key for searching the
configuration.
port: An integer, the TCP port to used to reach the device. If not
defined, the default value (22) will be returned.
username: A string, the username to use to connect to the device. It
will only be overridden if not defined.
Returns:
A tuple of (string, int, string) containing the new (hostname, port,
username).
"""
new_hostname = hostname
new_port = port
new_username = username
if SshOptions._ssh_options:
# We can't arrive here without first executing __init__, so we
# can assume that the _ssh_option is set and we don't need a
# lock since we're only doing readonly accesses.
host_config = SshOptions._ssh_options.lookup(hostname)
if host_config:
if 'hostname' in host_config:
new_hostname = host_config['hostname']
if (not new_port or new_port == 22) and 'port' in host_config:
try:
new_port = int(host_config['port'])
except ValueError:
raise SshConfigError('Invalid port value %s for %s' %
(host_config['port'], hostname))
if not new_username and 'user' in host_config:
new_username = host_config['user']
logging.debug(
'Translating %s:%s to %s:%s', hostname, port, new_hostname,
new_port)
if not new_port:
new_port = 22
return (new_hostname, new_port, new_username)
def Connect(hostname, username, password=None, port=22, ssh_keys=(),
timeout=TIMEOUT_DEFAULT):
"""Makes a paramiko SSH connection to a device.
Args:
hostname: A string, the hostname or IP address to connect to.
username: A string, the username to use on the connection.
password: A string, the password to use on the connection.
port: An int, the port number to connect to.
ssh_keys: A tuple of strings, SSH private keys (optional; may be None).
timeout: A float, the number of seconds before a connection times out.
Returns:
A paramiko.SSHClient() instance
"""
options = SshOptions()
hostname, port, username = options.Lookup(hostname, port, username)
ssh_client = None
def RaiseError(e, msg):
"""Raises an exception, disconnecting the SSH client.
Args:
e: An Exception.
msg: An object, exception arguments.
"""
raise e(msg)
try:
ssh_client = paramiko.SSHClient()
# Always auto-add remote SSH host keys.
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
# Connect using paramiko with a timeout parameter (requires paramiko 1.7)
if ssh_keys:
pkeys = []
for key in ssh_keys:
logging.debug('Using SSH private key for device authentication.')
# Use a virtual temporary file to store the key.
ssh_key_fileobj = cStringIO.StringIO()
ssh_key_fileobj.write(key)
ssh_key_fileobj.reset()
try:
pkeys.append(paramiko.DSSKey(file_obj=ssh_key_fileobj))
logging.debug('Using SSH DSA key for %r', hostname)
except (IndexError, paramiko.SSHException) as e:
if (isinstance(e, IndexError) or
'not a valid DSA private key file' in str(e)):
ssh_key_fileobj.reset()
try:
logging.debug('Using SSH RSA key for %r', hostname)
pkeys.append(paramiko.RSAKey(file_obj=ssh_key_fileobj))
except (IndexError, paramiko.SSHException) as e:
raise exceptions.AuthenticationError(str(e))
else:
raise exceptions.ConnectError('SSHException: %s' % str(e))
else:
logging.debug('Using password for %r', hostname)
pkeys = [None]
for pkey in pkeys:
saved_exception = None
try:
ssh_client.connect(hostname=hostname,
port=port,
username=username,
password=password,
pkey=pkey,
timeout=timeout,
allow_agent=False,
look_for_keys=False)
break
except (paramiko.AuthenticationException, paramiko.SSHException) as e:
saved_exception = e
if saved_exception is not None:
raise saved_exception # pylint: disable=raising-bad-type
transport = ssh_client.get_transport()
# Sometimes we have to authenticate a second time, eg. on Force10
# we always fail the first authentication (if we try pkey + pass,
# the pass succeeds; but if we do pass only, we have to do it
# twice). connect() above will have authenticated once.
if not transport.is_authenticated():
if pkeys != [None]:
for pkey in pkeys:
try:
transport.auth_publickey(username, pkey)
break
except paramiko.SSHException:
pass
if not transport.is_authenticated():
if password is not None:
try:
transport.auth_password(username, password)
except paramiko.SSHException:
pass
if not transport.is_authenticated():
msg = 'Not authenticated after two attempts on %r' % hostname
RaiseError(exceptions.ConnectError, msg)
except EOFError:
msg = 'EOFError connecting to: %r' % hostname
RaiseError(exceptions.ConnectError, msg)
except paramiko.AuthenticationException as e:
msg = 'Authentication error connecting to %s: %s' % (hostname, str(e))
RaiseError(exceptions.AuthenticationError, msg)
except paramiko.SSHException as e:
msg = 'SSHException connecting to %s: %s' % (hostname, str(e))
RaiseError(exceptions.ConnectError, msg)
except socket.timeout as e:
msg = 'Timed-out while connecting to %s: %s' % (hostname, str(e))
RaiseError(exceptions.ConnectError, msg)
except socket.error as e:
msg = 'Socket error connecting to %r: %s %s' % (hostname, e.__class__, e)
RaiseError(exceptions.ConnectError, msg)
return ssh_client
def _ScpRecvResponse(channel):
"""Receives a response on a SCP channel.
Args:
channel: A Paramiko channel object.
Raises:
ScpClosedError: If the device has closed the connection.
ScpMajorError: If the device reports a major error.
ScpMinorError: If the device reports a minor error.
ScpProtocolError: If an unexpected error occurs.
ScpTimeoutError: If no response is received within the timeout.
"""
buf = channel.recv(1)
while True:
if channel.recv_stderr_ready():
# Dodgy: Cisco sometimes *ask* for a password, but they don't actually
err = channel.recv_stderr(512)
if err == 'Password: ':
logging.warn('Password prompt received on SCP stderr, assuming '
'IOS bug (ignoring)')
else:
raise ScpProtocolError('Data on stderr: %r' % err)
if not buf:
raise ScpClosedError('Connection closed by remote device')
if buf == '\x00':
# Code \x00 indicates success. Brocade have been observed sending
# \x00\x02 followed by an error message, so we need to only read
# the single \x00 and leave the error message to be handled in a
# future call to _ScpRecvResponse.
return
try:
extra = channel.recv(512)
if not extra:
raise ScpProtocolError(
'Connection closed by remote device; partial response: %r' % buf)
else:
buf += extra
except socket.timeout:
if buf:
raise ScpProtocolError(
'Timed out reading from socket; partial response: %r' % buf)
else:
raise ScpTimeoutError('Timed out reading from socket')
if buf[-1] == '\n':
if buf[0] == '\x01':
if buf.startswith('\x01File ') and buf.rstrip().endswith(
'created successfully.'):
return
raise ScpMinorError(buf[1:-1])
elif buf[0] == '\x02':
# Code \x02: Fatal error.
raise ScpMajorError(buf[1:-1])
else:
# Default case: Fatal error.
raise ScpMajorError(buf[:-1])
def ScpPut(transport, source_data, destination_file, timeout, send_buffer=8192):
"""Puts a file via SCP protocol.
Args:
transport: A Paramiko transport object.
source_data: The source data to copy as a string.
destination_file: The file on the remote device.
timeout: The timeout to use for the SCP channel.
send_buffer: The number of bytes to send in each operation.
Raises:
ConnectionError: There was an error trying to start the SCP connection.
ScpError: There was an error copying the file.
"""
channel = transport.open_session()
try:
channel.settimeout(timeout)
channel.exec_command('scp -t %s' % destination_file)
# Server must acknowledge our connection.
_ScpRecvResponse(channel)
# Send file attributes, length and a dummy source file basename.
source_size = len(source_data)
channel.sendall('C0644 %d 1\n' % source_size)
# Server must acknowledge our request to send.
_ScpRecvResponse(channel)
# Send the data in chunks rather than all at once
pos = 0
while pos < source_size:
channel.sendall(source_data[pos:pos + send_buffer])
pos += send_buffer
# Indicate that we experienced no errors while sending.
channel.sendall('\0')
# Get the final status back from the device. Note: Force10 actually sends
# final status prior to getting the "all OK" from us.
_ScpRecvResponse(channel)
finally:
try:
channel.close()
except EOFError:
raise ScpChannelError('Error closing SCP channel')
| [
"#!/usr/bin/python\n",
"#\n",
"# Copyright 2013 Google Inc.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\"\"\"Commmon helper methods for creating an SSH connection.\"\"\"\n",
"\n",
"import cStringIO\n",
"import push_exceptions as exceptions\n",
"import gflags\n",
"import logging\n",
"import paramiko\n",
"import socket\n",
"import threading\n",
"\n",
"\n",
"gflags.DEFINE_string('paramiko_ssh_config',\n",
" '',\n",
" 'Use this file to pass options using the same format as '\n",
" 'OpenSSH.')\n",
"\n",
"FLAGS = gflags.FLAGS\n",
"\n",
"TIMEOUT_DEFAULT = 20.0\n",
"\n",
"\n",
"class Error(Exception):\n",
" pass\n",
"\n",
"\n",
"class ScpError(Error):\n",
" \"\"\"An error occurred while attempting a SCP copy.\"\"\"\n",
"\n",
"\n",
"class ScpTimeoutError(ScpError):\n",
" \"\"\"A device failed to respond to a SCP command within the timeout.\"\"\"\n",
"\n",
"\n",
"class ScpMinorError(ScpError):\n",
" \"\"\"A device reported a SCP minor error.\"\"\"\n",
"\n",
"\n",
"class ScpMajorError(ScpError):\n",
" \"\"\"A device reported a SCP major error.\"\"\"\n",
"\n",
"\n",
"class ScpProtocolError(ScpError):\n",
" \"\"\"An unexpected SCP error occurred.\"\"\"\n",
"\n",
"\n",
"class ScpChannelError(ScpError):\n",
" \"\"\"An error occurred with the SCP channel.\"\"\"\n",
"\n",
"\n",
"class ScpClosedError(ScpError):\n",
" \"\"\"A device closed the SCP connection.\"\"\"\n",
"\n",
"\n",
"class SshConfigError(ScpError):\n",
" \"\"\"The configuration file is either missing or malformed.\"\"\"\n",
"\n",
"\n",
"class SshOptions(object):\n",
" \"\"\"Singleton wrapper class around the SSH configuration.\n",
"\n",
" This class creates a SSHOption object if the command line flag\n",
" --paramiko_ssh_config was found and store the result for future\n",
" use. Since this class is called from several threads, it uses a lock\n",
" to protect concurrent attempts to load the configuration.\n",
" \"\"\"\n",
" _lock = threading.Lock()\n",
" _need_init = True\n",
" _ssh_options = None\n",
"\n",
" def __init__(self):\n",
" \"\"\"Read the configuration if present and store it for later.\n",
"\n",
" Check if the flag --paramiko_ssh_config was set and parse the\n",
" configuration file.\n",
" \"\"\"\n",
"\n",
" # This flag may be set by another thread concurrently. We will\n",
" # check the value again under a lock.\n",
" if SshOptions._need_init:\n",
" try:\n",
" with SshOptions._lock:\n",
" if SshOptions._need_init and FLAGS.paramiko_ssh_config:\n",
" logging.debug(\n",
" 'Reading configuration from %s', FLAGS.paramiko_ssh_config)\n",
"\n",
" try:\n",
" configfile = open(FLAGS.paramiko_ssh_config)\n",
" ssh_config = paramiko.SSHConfig()\n",
" ssh_config.parse(configfile)\n",
" SshOptions._ssh_options = ssh_config\n",
" except Exception as e: # pylint: disable=broad-except\n",
" # Unfortunately paramiko raises \"Exception\" if there is an\n",
" # error in the config file.\n",
" logging.fatal('Unable to read or parse \"%s\": %s',\n",
" FLAGS.paramiko_ssh_config, e)\n",
" finally:\n",
" SshOptions._need_init = False\n",
"\n",
" def Lookup(self, hostname, port, username):\n",
" \"\"\"Translate the hostname, port and username using the configuration.\n",
"\n",
" If the port is not defined, 22 is used. If the username is not\n",
" defined and no option override it, it will remain undefined.\n",
"\n",
" Args:\n",
" hostname: A string, the hostname to use as the key for searching the\n",
" configuration.\n",
" port: An integer, the TCP port to used to reach the device. If not\n",
" defined, the default value (22) will be returned.\n",
" username: A string, the username to use to connect to the device. It\n",
" will only be overridden if not defined.\n",
" Returns:\n",
" A tuple of (string, int, string) containing the new (hostname, port,\n",
" username).\n",
" \"\"\"\n",
"\n",
" new_hostname = hostname\n",
" new_port = port\n",
" new_username = username\n",
"\n",
" if SshOptions._ssh_options:\n",
" # We can't arrive here without first executing __init__, so we\n",
" # can assume that the _ssh_option is set and we don't need a\n",
" # lock since we're only doing readonly accesses.\n",
" host_config = SshOptions._ssh_options.lookup(hostname)\n",
" if host_config:\n",
" if 'hostname' in host_config:\n",
" new_hostname = host_config['hostname']\n",
"\n",
" if (not new_port or new_port == 22) and 'port' in host_config:\n",
" try:\n",
" new_port = int(host_config['port'])\n",
" except ValueError:\n",
" raise SshConfigError('Invalid port value %s for %s' %\n",
" (host_config['port'], hostname))\n",
"\n",
" if not new_username and 'user' in host_config:\n",
" new_username = host_config['user']\n",
"\n",
" logging.debug(\n",
" 'Translating %s:%s to %s:%s', hostname, port, new_hostname,\n",
" new_port)\n",
"\n",
" if not new_port:\n",
" new_port = 22\n",
"\n",
" return (new_hostname, new_port, new_username)\n",
"\n",
"\n",
"def Connect(hostname, username, password=None, port=22, ssh_keys=(),\n",
" timeout=TIMEOUT_DEFAULT):\n",
" \"\"\"Makes a paramiko SSH connection to a device.\n",
"\n",
" Args:\n",
" hostname: A string, the hostname or IP address to connect to.\n",
" username: A string, the username to use on the connection.\n",
" password: A string, the password to use on the connection.\n",
" port: An int, the port number to connect to.\n",
" ssh_keys: A tuple of strings, SSH private keys (optional; may be None).\n",
" timeout: A float, the number of seconds before a connection times out.\n",
"\n",
" Returns:\n",
" A paramiko.SSHClient() instance\n",
" \"\"\"\n",
"\n",
" options = SshOptions()\n",
" hostname, port, username = options.Lookup(hostname, port, username)\n",
" ssh_client = None\n",
"\n",
" def RaiseError(e, msg):\n",
" \"\"\"Raises an exception, disconnecting the SSH client.\n",
"\n",
" Args:\n",
" e: An Exception.\n",
" msg: An object, exception arguments.\n",
" \"\"\"\n",
" raise e(msg)\n",
"\n",
" try:\n",
" ssh_client = paramiko.SSHClient()\n",
" # Always auto-add remote SSH host keys.\n",
" ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n",
" ssh_client.load_system_host_keys()\n",
" # Connect using paramiko with a timeout parameter (requires paramiko 1.7)\n",
" if ssh_keys:\n",
" pkeys = []\n",
" for key in ssh_keys:\n",
" logging.debug('Using SSH private key for device authentication.')\n",
" # Use a virtual temporary file to store the key.\n",
" ssh_key_fileobj = cStringIO.StringIO()\n",
" ssh_key_fileobj.write(key)\n",
" ssh_key_fileobj.reset()\n",
" try:\n",
" pkeys.append(paramiko.DSSKey(file_obj=ssh_key_fileobj))\n",
" logging.debug('Using SSH DSA key for %r', hostname)\n",
" except (IndexError, paramiko.SSHException) as e:\n",
" if (isinstance(e, IndexError) or\n",
" 'not a valid DSA private key file' in str(e)):\n",
" ssh_key_fileobj.reset()\n",
" try:\n",
" logging.debug('Using SSH RSA key for %r', hostname)\n",
" pkeys.append(paramiko.RSAKey(file_obj=ssh_key_fileobj))\n",
" except (IndexError, paramiko.SSHException) as e:\n",
" raise exceptions.AuthenticationError(str(e))\n",
" else:\n",
" raise exceptions.ConnectError('SSHException: %s' % str(e))\n",
" else:\n",
" logging.debug('Using password for %r', hostname)\n",
" pkeys = [None]\n",
" for pkey in pkeys:\n",
" saved_exception = None\n",
" try:\n",
" ssh_client.connect(hostname=hostname,\n",
" port=port,\n",
" username=username,\n",
" password=password,\n",
" pkey=pkey,\n",
" timeout=timeout,\n",
" allow_agent=False,\n",
" look_for_keys=False)\n",
" break\n",
" except (paramiko.AuthenticationException, paramiko.SSHException) as e:\n",
" saved_exception = e\n",
" if saved_exception is not None:\n",
" raise saved_exception # pylint: disable=raising-bad-type\n",
" transport = ssh_client.get_transport()\n",
" # Sometimes we have to authenticate a second time, eg. on Force10\n",
" # we always fail the first authentication (if we try pkey + pass,\n",
" # the pass succeeds; but if we do pass only, we have to do it\n",
" # twice). connect() above will have authenticated once.\n",
" if not transport.is_authenticated():\n",
" if pkeys != [None]:\n",
" for pkey in pkeys:\n",
" try:\n",
" transport.auth_publickey(username, pkey)\n",
" break\n",
" except paramiko.SSHException:\n",
" pass\n",
" if not transport.is_authenticated():\n",
" if password is not None:\n",
" try:\n",
" transport.auth_password(username, password)\n",
" except paramiko.SSHException:\n",
" pass\n",
" if not transport.is_authenticated():\n",
" msg = 'Not authenticated after two attempts on %r' % hostname\n",
" RaiseError(exceptions.ConnectError, msg)\n",
" except EOFError:\n",
" msg = 'EOFError connecting to: %r' % hostname\n",
" RaiseError(exceptions.ConnectError, msg)\n",
" except paramiko.AuthenticationException as e:\n",
" msg = 'Authentication error connecting to %s: %s' % (hostname, str(e))\n",
" RaiseError(exceptions.AuthenticationError, msg)\n",
" except paramiko.SSHException as e:\n",
" msg = 'SSHException connecting to %s: %s' % (hostname, str(e))\n",
" RaiseError(exceptions.ConnectError, msg)\n",
" except socket.timeout as e:\n",
" msg = 'Timed-out while connecting to %s: %s' % (hostname, str(e))\n",
" RaiseError(exceptions.ConnectError, msg)\n",
" except socket.error as e:\n",
" msg = 'Socket error connecting to %r: %s %s' % (hostname, e.__class__, e)\n",
" RaiseError(exceptions.ConnectError, msg)\n",
"\n",
" return ssh_client\n",
"\n",
"\n",
"def _ScpRecvResponse(channel):\n",
" \"\"\"Receives a response on a SCP channel.\n",
"\n",
" Args:\n",
" channel: A Paramiko channel object.\n",
"\n",
" Raises:\n",
" ScpClosedError: If the device has closed the connection.\n",
" ScpMajorError: If the device reports a major error.\n",
" ScpMinorError: If the device reports a minor error.\n",
" ScpProtocolError: If an unexpected error occurs.\n",
" ScpTimeoutError: If no response is received within the timeout.\n",
" \"\"\"\n",
" buf = channel.recv(1)\n",
" while True:\n",
" if channel.recv_stderr_ready():\n",
" # Dodgy: Cisco sometimes *ask* for a password, but they don't actually\n",
" err = channel.recv_stderr(512)\n",
" if err == 'Password: ':\n",
" logging.warn('Password prompt received on SCP stderr, assuming '\n",
" 'IOS bug (ignoring)')\n",
" else:\n",
" raise ScpProtocolError('Data on stderr: %r' % err)\n",
"\n",
" if not buf:\n",
" raise ScpClosedError('Connection closed by remote device')\n",
"\n",
" if buf == '\\x00':\n",
" # Code \\x00 indicates success. Brocade have been observed sending\n",
" # \\x00\\x02 followed by an error message, so we need to only read\n",
" # the single \\x00 and leave the error message to be handled in a\n",
" # future call to _ScpRecvResponse.\n",
" return\n",
"\n",
" try:\n",
" extra = channel.recv(512)\n",
" if not extra:\n",
" raise ScpProtocolError(\n",
" 'Connection closed by remote device; partial response: %r' % buf)\n",
" else:\n",
" buf += extra\n",
" except socket.timeout:\n",
" if buf:\n",
" raise ScpProtocolError(\n",
" 'Timed out reading from socket; partial response: %r' % buf)\n",
" else:\n",
" raise ScpTimeoutError('Timed out reading from socket')\n",
"\n",
" if buf[-1] == '\\n':\n",
" if buf[0] == '\\x01':\n",
" if buf.startswith('\\x01File ') and buf.rstrip().endswith(\n",
" 'created successfully.'):\n",
" return\n",
" raise ScpMinorError(buf[1:-1])\n",
" elif buf[0] == '\\x02':\n",
" # Code \\x02: Fatal error.\n",
" raise ScpMajorError(buf[1:-1])\n",
" else:\n",
" # Default case: Fatal error.\n",
" raise ScpMajorError(buf[:-1])\n",
"\n",
"\n",
"def ScpPut(transport, source_data, destination_file, timeout, send_buffer=8192):\n",
" \"\"\"Puts a file via SCP protocol.\n",
"\n",
" Args:\n",
" transport: A Paramiko transport object.\n",
" source_data: The source data to copy as a string.\n",
" destination_file: The file on the remote device.\n",
" timeout: The timeout to use for the SCP channel.\n",
" send_buffer: The number of bytes to send in each operation.\n",
"\n",
" Raises:\n",
" ConnectionError: There was an error trying to start the SCP connection.\n",
" ScpError: There was an error copying the file.\n",
" \"\"\"\n",
" channel = transport.open_session()\n",
" try:\n",
" channel.settimeout(timeout)\n",
" channel.exec_command('scp -t %s' % destination_file)\n",
"\n",
" # Server must acknowledge our connection.\n",
" _ScpRecvResponse(channel)\n",
"\n",
" # Send file attributes, length and a dummy source file basename.\n",
" source_size = len(source_data)\n",
" channel.sendall('C0644 %d 1\\n' % source_size)\n",
"\n",
" # Server must acknowledge our request to send.\n",
" _ScpRecvResponse(channel)\n",
"\n",
" # Send the data in chunks rather than all at once\n",
" pos = 0\n",
" while pos < source_size:\n",
" channel.sendall(source_data[pos:pos + send_buffer])\n",
" pos += send_buffer\n",
"\n",
" # Indicate that we experienced no errors while sending.\n",
" channel.sendall('\\0')\n",
"\n",
" # Get the final status back from the device. Note: Force10 actually sends\n",
" # final status prior to getting the \"all OK\" from us.\n",
" _ScpRecvResponse(channel)\n",
" finally:\n",
" try:\n",
" channel.close()\n",
" except EOFError:\n",
" raise ScpChannelError('Error closing SCP channel')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0.013888888888888888,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0.05,
0.045454545454545456,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09090909090909091,
0,
0.015151515151515152,
0,
0,
0,
0,
0.01694915254237288,
0.020833333333333332,
0.023255813953488372,
0.0196078431372549,
0,
0.0136986301369863,
0.023809523809523808,
0.015625,
0,
0.06666666666666667,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406,
0.014925373134328358,
0.01818181818181818,
0.01639344262295082,
0.045454545454545456,
0,
0.02040816326530612,
0,
0,
0.06666666666666667,
0,
0.034482758620689655,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0.014285714285714285,
0.05,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0.016129032258064516,
0,
0.023255813953488372,
0.01639344262295082,
0,
0,
0.015151515151515152,
0.014285714285714285,
0,
0.01694915254237288,
0.0625,
0,
0,
0.01818181818181818,
0.047619047619047616,
0,
0.034482758620689655,
0.09090909090909091,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0.015625,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0.06666666666666667,
0,
0,
0.025,
0,
0,
0.03225806451612903,
0,
0.018518518518518517,
0,
0.06666666666666667,
0,
0.014705882352941176,
0.02127659574468085,
0.05263157894736842,
0,
0,
0.020833333333333332,
0,
0,
0.02702702702702703,
0,
0,
0.03333333333333333,
0,
0,
0.03571428571428571,
0,
0,
0,
0.05,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0.07142857142857142,
0,
0.012987012987012988,
0.02702702702702703,
0.03333333333333333,
0,
0,
0.08333333333333333,
0,
0,
0,
0.015384615384615385,
0,
0,
0.0136986301369863,
0.014084507042253521,
0.014084507042253521,
0.024390243902439025,
0.07692307692307693,
0,
0,
0.03125,
0.05,
0,
0,
0.08333333333333333,
0,
0,
0.07142857142857142,
0,
0,
0.08333333333333333,
0,
0,
0,
0.037037037037037035,
0,
0.02631578947368421,
0.058823529411764705,
0,
0.034482758620689655,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0.012345679012345678,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0.04,
0,
0,
0,
0,
0,
0,
0,
0.09090909090909091,
0,
0.045454545454545456,
0,
0.017543859649122806
] | 389 | 0.009938 |
#
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module for sanity checking group contents model.
"""
import logging
import itertools
from conary import versions
from conary.deps import deps
from updatebot.errors import OldVersionsFoundError
from updatebot.errors import GroupValidationFailedError
from updatebot.errors import NameVersionConflictsFoundError
from updatebot.errors import ExpectedRemovalValidationFailedError
log = logging.getLogger('updatebot.groupmgr')
class GroupSanityChecker(object):
"""
Class for checking group model sanity.
"""
def __init__(self, cfg, helper):
self._cfg = cfg
self._helper = helper
def check(self, groups, errataState):
"""
Validate the contents of the package group to ensure sanity:
1. Check for packages that have the same source name, but
different versions.
2. Check that the version in the group is the latest source/build
of that version.
3. Check that package removals specified in the config file have
occured.
"""
errors = []
for name, group in groups.iteritems():
log.info('checking consistency of %s' % name)
try:
log.info('checking name version conflict')
self._checkNameVersionConflict(group)
except NameVersionConflictsFoundError, e:
errors.append((group, e))
# FIXME: This is a hack, there should be a better way of controlling
# what policy runs for a particular group.
if 'standard' not in name:
try:
log.info('checking latest versions')
self._checkLatestVersion(group)
except OldVersionsFoundError, e:
errors.append((group, e))
try:
log.info('checking removals')
self._checkRemovals(group, errataState)
except ExpectedRemovalValidationFailedError, e:
errors.append((group, e))
if errors:
raise GroupValidationFailedError(errors=errors)
def _checkNameVersionConflict(self, group):
"""
Check for packages taht have the same source name, but different
versions.
"""
# get names and versions
troves = set()
labels = set()
for pkgKey, pkgData in group.iteritems():
name = str(pkgData.name)
version = None
if pkgData.version:
versionObj = versions.ThawVersion(pkgData.version)
labels.add(versionObj.branch().label())
version = str(versionObj.asString())
flavor = None
# FIXME: At some point we might want to add proper flavor handling,
# note that group flavor handling is different than what
# findTroves normally does.
#if pkgData.flavor:
# flavor = deps.ThawFlavor(str(pkgData.flavor))
troves.add((name, version, flavor))
# Get flavors and such.
foundTroves = set([ x for x in
itertools.chain(*self._helper.findTroves(troves,
labels=labels).itervalues()) ])
# get sources for each name version pair
sources = self._helper.getSourceVersions(foundTroves)
seen = {}
for (n, v, f), pkgSet in sources.iteritems():
binVer = list(pkgSet)[0][1]
seen.setdefault(n, set()).add(binVer)
binPkgs = {}
conflicts = {}
for name, vers in seen.iteritems():
if len(vers) > 1:
log.error('found multiple versions of %s' % name)
for binVer in vers:
srcVer = binVer.getSourceVersion()
nvf = (name, srcVer, None)
conflicts.setdefault(name, []).append(srcVer)
binPkgs[nvf] = sources[nvf]
if conflicts:
raise NameVersionConflictsFoundError(groupName=group.groupName,
conflicts=conflicts,
binPkgs=binPkgs)
def _checkLatestVersion(self, group):
"""
Check to make sure each specific conary version is the latest source
and build count of the upstream version.
"""
# get names and versions
troves = set()
labels = set()
for pkgKey, pkgData in group.iteritems():
name = str(pkgData.name)
version = None
if pkgData.version:
version = versions.ThawVersion(pkgData.version)
labels.add(version.branch().label())
# get upstream version
revision = version.trailingRevision()
upstreamVersion = revision.getVersion()
# FIXME: This should probably be a fully formed version
# as above.
version = version.branch().label().asString() + '/' + upstreamVersion
flavor = None
# FIXME: At some point we might want to add proper flavor handling,
# note that group flavor handling is different than what
# findTroves normally does.
#if pkgData.flavor:
# flavor = deps.ThawFlavor(str(pkgData.flavor))
troves.add((name, version, flavor))
# Get flavors and such.
foundTroves = dict([ (x[0], y) for x, y in
self._helper.findTroves(troves, labels=labels).iteritems() ])
pkgs = {}
for pkgKey, pkgData in group.iteritems():
name = str(pkgData.name)
version = None
if pkgData.version:
version = versions.ThawVersion(pkgData.version)
flavor = None
if pkgData.flavor:
flavor = deps.ThawFlavor(str(pkgData.flavor))
pkgs.setdefault(name, []).append((name, version, flavor))
assert len(pkgs) == len(foundTroves)
# Get all old versions so that we can make sure any version conflicts
# were introduced by old version handling.
oldVersions = set()
if self._cfg.platformSearchPath:
qlabels = set(self._cfg.platformSearchPath) | labels
else:
qlabels = labels
for nvfLst in self._cfg.useOldVersion.itervalues():
for nvf in nvfLst:
srcMap = self._helper.getSourceVersionMapFromBinaryVersion(nvf,
labels=qlabels, latest=False)
oldVersions |= set(itertools.chain(*srcMap.itervalues()))
errors = {}
for name, found in foundTroves.iteritems():
assert name in pkgs
# Make sure to dedup packages from the model since a name/version
# pair can occure more than once.
current = sorted(set(pkgs[name]))
# FIXME: HACK to filter found for the versions in current.
# Do to some issues early on with building pkgs with missing
# flavors findTroves is returning some extra cruft.
current_versions = [ currentnvf[1] for currentnvf in current ]
found = [ nvf for nvf in found if nvf[1] in current_versions ]
if len(current) > len(found):
log.warn('found more packages in the model than in the '
'repository, assuming that multiversion policy will '
'catch this.')
continue
assert len(current) == 1 or len(found) == len(current)
foundError = False
for i, (n, v, f) in enumerate(found):
if len(current) == 1:
i = 0
cn, cv, cf = current[i]
assert n == cn
if v != cv:
if (n, v, f) in oldVersions:
log.info('found %s=%s[%s] in oldVersions exceptions'
% (n, v, f))
continue
# This is probably a flavor that we don't care about
# anymore.
if cv > v and cv in [ x[1] for x in found ]:
log.warn('missing flavors found of %s that are not all '
'included in the group, assuming this '
'intentional.' % cn)
continue
foundError = True
if foundError:
log.error('found old version for %s' % name)
errors[name] = (current, found)
if errors:
raise OldVersionsFoundError(pkgNames=errors.keys(), errors=errors)
def _checkRemovals(self, group, updateId):
"""
Check to make sure that all configured package removals have happened.
"""
# get package removals from the config object.
removePackages = self._cfg.updateRemovesPackages.get(updateId, [])
removeObsoleted = self._cfg.removeObsoleted.get(updateId, [])
removeSource = [ x[0] for x in
self._cfg.removeSource.get(updateId, []) ]
# get names and versions
troves = set()
labels = set()
for pkgKey, pkgData in group.iteritems():
name = str(pkgData.name)
version = None
if pkgData.version:
versionObj = versions.ThawVersion(pkgData.version)
labels.add(versionObj.branch().label())
version = str(versionObj.asString())
flavor = None
troves.add((name, version, flavor))
# Get flavors and such.
foundTroves = set([ x for x in
itertools.chain(*self._helper.findTroves(troves,
labels=labels).itervalues()) ])
# get sources for each name version pair
sources = self._helper.getSourceVersions(foundTroves)
# collapse to sourceName: [ binNames, ] dictionary
sourceNameMap = dict([ (x[0].split(':')[0], [ z[0] for z in y ])
for x, y in sources.iteritems() ])
binRemovals = set(itertools.chain(*[ sourceNameMap[x]
for x in removeSource
if x in sourceNameMap ]))
# take the union
removals = set(removePackages) | set(removeObsoleted) | binRemovals
errors = []
# Make sure these packages are not in the group model.
for pkgKey, pkgData in group.iteritems():
if pkgData.name in removals:
errors.append(pkgData.name)
if errors:
log.info('found packages that should be removed %s' % errors)
raise ExpectedRemovalValidationFailedError(updateId=updateId,
pkgNames=errors)
| [
"#\n",
"# Copyright (c) SAS Institute, Inc.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"#\n",
"\n",
"\n",
"\"\"\"\n",
"Module for sanity checking group contents model.\n",
"\"\"\"\n",
"\n",
"import logging\n",
"import itertools\n",
"\n",
"from conary import versions\n",
"from conary.deps import deps\n",
"\n",
"from updatebot.errors import OldVersionsFoundError\n",
"from updatebot.errors import GroupValidationFailedError\n",
"from updatebot.errors import NameVersionConflictsFoundError\n",
"from updatebot.errors import ExpectedRemovalValidationFailedError\n",
"\n",
"log = logging.getLogger('updatebot.groupmgr')\n",
"\n",
"class GroupSanityChecker(object):\n",
" \"\"\"\n",
" Class for checking group model sanity.\n",
" \"\"\"\n",
"\n",
" def __init__(self, cfg, helper):\n",
" self._cfg = cfg\n",
" self._helper = helper\n",
"\n",
" def check(self, groups, errataState):\n",
" \"\"\"\n",
" Validate the contents of the package group to ensure sanity:\n",
" 1. Check for packages that have the same source name, but\n",
" different versions.\n",
" 2. Check that the version in the group is the latest source/build\n",
" of that version.\n",
" 3. Check that package removals specified in the config file have\n",
" occured.\n",
" \"\"\"\n",
"\n",
" errors = []\n",
" for name, group in groups.iteritems():\n",
" log.info('checking consistency of %s' % name)\n",
" try:\n",
" log.info('checking name version conflict')\n",
" self._checkNameVersionConflict(group)\n",
" except NameVersionConflictsFoundError, e:\n",
" errors.append((group, e))\n",
"\n",
" # FIXME: This is a hack, there should be a better way of controlling\n",
" # what policy runs for a particular group.\n",
" if 'standard' not in name:\n",
" try:\n",
" log.info('checking latest versions')\n",
" self._checkLatestVersion(group)\n",
" except OldVersionsFoundError, e:\n",
" errors.append((group, e))\n",
"\n",
" try:\n",
" log.info('checking removals')\n",
" self._checkRemovals(group, errataState)\n",
" except ExpectedRemovalValidationFailedError, e:\n",
" errors.append((group, e))\n",
"\n",
" if errors:\n",
" raise GroupValidationFailedError(errors=errors)\n",
"\n",
" def _checkNameVersionConflict(self, group):\n",
" \"\"\"\n",
" Check for packages taht have the same source name, but different\n",
" versions.\n",
" \"\"\"\n",
"\n",
" # get names and versions\n",
" troves = set()\n",
" labels = set()\n",
" for pkgKey, pkgData in group.iteritems():\n",
" name = str(pkgData.name)\n",
"\n",
" version = None\n",
" if pkgData.version:\n",
" versionObj = versions.ThawVersion(pkgData.version)\n",
" labels.add(versionObj.branch().label())\n",
" version = str(versionObj.asString())\n",
"\n",
" flavor = None\n",
" # FIXME: At some point we might want to add proper flavor handling,\n",
" # note that group flavor handling is different than what\n",
" # findTroves normally does.\n",
" #if pkgData.flavor:\n",
" # flavor = deps.ThawFlavor(str(pkgData.flavor))\n",
"\n",
" troves.add((name, version, flavor))\n",
"\n",
" # Get flavors and such.\n",
" foundTroves = set([ x for x in\n",
" itertools.chain(*self._helper.findTroves(troves,\n",
" labels=labels).itervalues()) ])\n",
"\n",
" # get sources for each name version pair\n",
" sources = self._helper.getSourceVersions(foundTroves)\n",
"\n",
" seen = {}\n",
" for (n, v, f), pkgSet in sources.iteritems():\n",
" binVer = list(pkgSet)[0][1]\n",
" seen.setdefault(n, set()).add(binVer)\n",
"\n",
" binPkgs = {}\n",
" conflicts = {}\n",
" for name, vers in seen.iteritems():\n",
" if len(vers) > 1:\n",
" log.error('found multiple versions of %s' % name)\n",
" for binVer in vers:\n",
" srcVer = binVer.getSourceVersion()\n",
" nvf = (name, srcVer, None)\n",
" conflicts.setdefault(name, []).append(srcVer)\n",
" binPkgs[nvf] = sources[nvf]\n",
"\n",
" if conflicts:\n",
" raise NameVersionConflictsFoundError(groupName=group.groupName,\n",
" conflicts=conflicts,\n",
" binPkgs=binPkgs)\n",
"\n",
" def _checkLatestVersion(self, group):\n",
" \"\"\"\n",
" Check to make sure each specific conary version is the latest source\n",
" and build count of the upstream version.\n",
" \"\"\"\n",
"\n",
" # get names and versions\n",
" troves = set()\n",
" labels = set()\n",
" for pkgKey, pkgData in group.iteritems():\n",
" name = str(pkgData.name)\n",
"\n",
" version = None\n",
" if pkgData.version:\n",
" version = versions.ThawVersion(pkgData.version)\n",
" labels.add(version.branch().label())\n",
" # get upstream version\n",
" revision = version.trailingRevision()\n",
" upstreamVersion = revision.getVersion()\n",
"\n",
" # FIXME: This should probably be a fully formed version\n",
" # as above.\n",
" version = version.branch().label().asString() + '/' + upstreamVersion\n",
"\n",
" flavor = None\n",
" # FIXME: At some point we might want to add proper flavor handling,\n",
" # note that group flavor handling is different than what\n",
" # findTroves normally does.\n",
" #if pkgData.flavor:\n",
" # flavor = deps.ThawFlavor(str(pkgData.flavor))\n",
"\n",
" troves.add((name, version, flavor))\n",
"\n",
" # Get flavors and such.\n",
" foundTroves = dict([ (x[0], y) for x, y in\n",
" self._helper.findTroves(troves, labels=labels).iteritems() ])\n",
"\n",
" pkgs = {}\n",
" for pkgKey, pkgData in group.iteritems():\n",
" name = str(pkgData.name)\n",
" version = None\n",
" if pkgData.version:\n",
" version = versions.ThawVersion(pkgData.version)\n",
" flavor = None\n",
" if pkgData.flavor:\n",
" flavor = deps.ThawFlavor(str(pkgData.flavor))\n",
"\n",
" pkgs.setdefault(name, []).append((name, version, flavor))\n",
"\n",
" assert len(pkgs) == len(foundTroves)\n",
"\n",
" # Get all old versions so that we can make sure any version conflicts\n",
" # were introduced by old version handling.\n",
" oldVersions = set()\n",
" if self._cfg.platformSearchPath:\n",
" qlabels = set(self._cfg.platformSearchPath) | labels\n",
" else:\n",
" qlabels = labels\n",
" for nvfLst in self._cfg.useOldVersion.itervalues():\n",
" for nvf in nvfLst:\n",
" srcMap = self._helper.getSourceVersionMapFromBinaryVersion(nvf,\n",
" labels=qlabels, latest=False)\n",
" oldVersions |= set(itertools.chain(*srcMap.itervalues()))\n",
"\n",
" errors = {}\n",
" for name, found in foundTroves.iteritems():\n",
" assert name in pkgs\n",
" # Make sure to dedup packages from the model since a name/version\n",
" # pair can occure more than once.\n",
" current = sorted(set(pkgs[name]))\n",
"\n",
" # FIXME: HACK to filter found for the versions in current.\n",
" # Do to some issues early on with building pkgs with missing\n",
" # flavors findTroves is returning some extra cruft.\n",
" current_versions = [ currentnvf[1] for currentnvf in current ]\n",
" found = [ nvf for nvf in found if nvf[1] in current_versions ]\n",
" \n",
" if len(current) > len(found):\n",
" log.warn('found more packages in the model than in the '\n",
" 'repository, assuming that multiversion policy will '\n",
" 'catch this.')\n",
" continue\n",
"\n",
" assert len(current) == 1 or len(found) == len(current)\n",
"\n",
" foundError = False\n",
" for i, (n, v, f) in enumerate(found):\n",
" if len(current) == 1:\n",
" i = 0\n",
" cn, cv, cf = current[i]\n",
" assert n == cn\n",
"\n",
" if v != cv:\n",
" if (n, v, f) in oldVersions:\n",
" log.info('found %s=%s[%s] in oldVersions exceptions'\n",
" % (n, v, f))\n",
" continue\n",
"\n",
" # This is probably a flavor that we don't care about\n",
" # anymore.\n",
" if cv > v and cv in [ x[1] for x in found ]:\n",
" log.warn('missing flavors found of %s that are not all '\n",
" 'included in the group, assuming this '\n",
" 'intentional.' % cn)\n",
" continue\n",
"\n",
" foundError = True\n",
"\n",
" if foundError:\n",
" log.error('found old version for %s' % name)\n",
" errors[name] = (current, found)\n",
"\n",
" if errors:\n",
" raise OldVersionsFoundError(pkgNames=errors.keys(), errors=errors)\n",
"\n",
" def _checkRemovals(self, group, updateId):\n",
" \"\"\"\n",
" Check to make sure that all configured package removals have happened.\n",
" \"\"\"\n",
"\n",
" # get package removals from the config object.\n",
" removePackages = self._cfg.updateRemovesPackages.get(updateId, [])\n",
" removeObsoleted = self._cfg.removeObsoleted.get(updateId, [])\n",
" removeSource = [ x[0] for x in\n",
" self._cfg.removeSource.get(updateId, []) ]\n",
"\n",
" # get names and versions\n",
" troves = set()\n",
" labels = set()\n",
" for pkgKey, pkgData in group.iteritems():\n",
" name = str(pkgData.name)\n",
"\n",
" version = None\n",
" if pkgData.version:\n",
" versionObj = versions.ThawVersion(pkgData.version)\n",
" labels.add(versionObj.branch().label())\n",
" version = str(versionObj.asString())\n",
"\n",
" flavor = None\n",
" troves.add((name, version, flavor))\n",
"\n",
" # Get flavors and such.\n",
" foundTroves = set([ x for x in\n",
" itertools.chain(*self._helper.findTroves(troves,\n",
" labels=labels).itervalues()) ])\n",
"\n",
" # get sources for each name version pair\n",
" sources = self._helper.getSourceVersions(foundTroves)\n",
"\n",
" # collapse to sourceName: [ binNames, ] dictionary\n",
" sourceNameMap = dict([ (x[0].split(':')[0], [ z[0] for z in y ])\n",
" for x, y in sources.iteritems() ])\n",
"\n",
" binRemovals = set(itertools.chain(*[ sourceNameMap[x]\n",
" for x in removeSource\n",
" if x in sourceNameMap ]))\n",
"\n",
" # take the union\n",
" removals = set(removePackages) | set(removeObsoleted) | binRemovals\n",
"\n",
" errors = []\n",
" # Make sure these packages are not in the group model.\n",
" for pkgKey, pkgData in group.iteritems():\n",
" if pkgData.name in removals:\n",
" errors.append(pkgData.name)\n",
"\n",
" if errors:\n",
" log.info('found packages that should be removed %s' % errors)\n",
" raise ExpectedRemovalValidationFailedError(updateId=updateId,\n",
" pkgNames=errors)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0.02564102564102564,
0.01639344262295082,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0.0196078431372549,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02666666666666667,
0.02666666666666667,
0.07692307692307693,
0,
0,
0.013513513513513514,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03076923076923077,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.014705882352941176,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.01639344262295082,
0.025,
0,
0,
0,
0,
0,
0.0410958904109589,
0.015151515151515152,
0,
0.016129032258064516,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 307 | 0.002141 |
'''
Segment_tree creates a segment tree with a given array and function,
allowing queries to be done later in log(N) time
function takes 2 values and returns a same type value
'''
class SegmentTree:
def __init__(self,arr,function):
self.segment = [0 for x in range(3*len(arr)+3)]
self.arr = arr
self.fn = function
self.maketree(0,0,len(arr)-1)
def make_tree(self,i,l,r):
if l==r:
self.segment[i] = self.arr[l]
elif l<r:
self.make_tree(2*i+1,l,int((l+r)/2))
self.make_tree(2*i+2,int((l+r)/2)+1,r)
self.segment[i] = self.fn(self.segment[2*i+1],self.segment[2*i+2])
def __query(self,i,L,R,l,r):
if l>R or r<L or L>R or l>r:
return None
if L>=l and R<=r:
return self.segment[i]
val1 = self.__query(2*i+1,L,int((L+R)/2),l,r)
val2 = self.__query(2*i+2,int((L+R+2)/2),R,l,r)
print(L,R," returned ",val1,val2)
if val1 != None:
if val2 != None:
return self.fn(val1,val2)
return val1
return val2
def query(self,L,R):
return self.__query(0,0,len(self.arr)-1,L,R)
'''
Example -
mytree = SegmentTree([2,4,5,3,4],max)
mytree.query(2,4)
mytree.query(0,3) ...
mytree = SegmentTree([4,5,2,3,4,43,3],sum)
mytree.query(1,8)
...
'''
| [
"'''\n",
"Segment_tree creates a segment tree with a given array and function,\n",
"allowing queries to be done later in log(N) time\n",
"function takes 2 values and returns a same type value\n",
"'''\n",
"class SegmentTree:\n",
" def __init__(self,arr,function):\n",
" self.segment = [0 for x in range(3*len(arr)+3)]\n",
" self.arr = arr\n",
" self.fn = function\n",
" self.maketree(0,0,len(arr)-1)\n",
"\n",
" def make_tree(self,i,l,r):\n",
" if l==r:\n",
" self.segment[i] = self.arr[l]\n",
" elif l<r:\n",
" self.make_tree(2*i+1,l,int((l+r)/2))\n",
" self.make_tree(2*i+2,int((l+r)/2)+1,r)\n",
" self.segment[i] = self.fn(self.segment[2*i+1],self.segment[2*i+2])\n",
"\n",
" def __query(self,i,L,R,l,r):\n",
" if l>R or r<L or L>R or l>r:\n",
" return None\n",
" if L>=l and R<=r:\n",
" return self.segment[i]\n",
" val1 = self.__query(2*i+1,L,int((L+R)/2),l,r)\n",
" val2 = self.__query(2*i+2,int((L+R+2)/2),R,l,r)\n",
" print(L,R,\" returned \",val1,val2)\n",
" if val1 != None:\n",
" if val2 != None:\n",
" return self.fn(val1,val2)\n",
" return val1\n",
" return val2\n",
" \n",
"\n",
" def query(self,L,R):\n",
" return self.__query(0,0,len(self.arr)-1,L,R)\n",
"\n",
"'''\n",
"Example -\n",
"mytree = SegmentTree([2,4,5,3,4],max)\n",
"mytree.query(2,4)\n",
"mytree.query(0,3) ...\n",
"\n",
"mytree = SegmentTree([4,5,2,3,4,43,3],sum)\n",
"mytree.query(1,8)\n",
"...\n",
"\n",
"'''\n"
] | [
0,
0,
0,
0,
0,
0.05263157894736842,
0.05405405405405406,
0,
0,
0,
0.05263157894736842,
0,
0.12903225806451613,
0.11764705882352941,
0,
0.05555555555555555,
0.04081632653061224,
0.0392156862745098,
0.012658227848101266,
0,
0.18181818181818182,
0.10810810810810811,
0,
0.07692307692307693,
0,
0.07407407407407407,
0.07142857142857142,
0.09523809523809523,
0.04,
0.034482758620689655,
0.023809523809523808,
0,
0,
0.1111111111111111,
0,
0.12,
0.07547169811320754,
0,
0.25,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 49 | 0.037076 |
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare command to invoke the same page in two versions of a browser.
Does the easiest compatibility test: equality comparison between two different
versions of the same browser. Invoked with a series of command line options
that specify which URLs to check, which browser to use, where to store results,
etc.
"""
import os # Functions for walking the directory tree
import tempfile # Get a temporary directory to hold intermediates
import command_line
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["compare2"],
"Compares the output of two browsers on the same URL or list of URLs",
ValidateCompare2,
ExecuteCompare2)
cmd.AddArgument(
["-b1", "--browser1"], "Full path to first browser's executable",
type="readfile", metaname="PATH", required=True)
cmd.AddArgument(
["-b2", "--browser2"], "Full path to second browser's executable",
type="readfile", metaname="PATH", required=True)
cmd.AddArgument(
["-b", "--browser"], "Which browser to use", type="string",
default="chrome")
cmd.AddArgument(
["-b1v", "--browser1ver"], "Version of first browser", metaname="VERSION")
cmd.AddArgument(
["-b2v", "--browser2ver"], "Version of second browser", metaname="VERSION")
cmd.AddArgument(
["-b1n", "--browser1name"], "Optional name for first browser (used in "
"directory to hold intermediate files)", metaname="NAME")
cmd.AddArgument(
["-b2n", "--browser2name"], "Optional name for second browser (used in "
"directory to hold intermediate files)", metaname="NAME")
cmd.AddArgument(
["-o", "--outdir"], "Directory to store scrape files", metaname="DIR")
cmd.AddArgument(
["-u", "--url"], "URL to compare")
cmd.AddArgument(
["-l", "--list"], "List of URLs to compare", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=60)
cmd.AddArgument(
["-log", "--logfile"], "File to write output", type="string", required=True)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
cmd.AddArgument(
["-m", "--maskdir"], "Path that holds masks to use for comparison")
cmd.AddArgument(
["-d", "--diffdir"], "Path to hold the difference of comparisons that fail")
def ValidateCompare2(command):
"""Validate the arguments to compare2. Raises ParseError if failed."""
executables = [".exe", ".com", ".bat"]
if (os.path.splitext(command["--browser1"])[1].lower() not in executables or
os.path.splitext(command["--browser2"])[1].lower() not in executables):
raise command_line.ParseError("Browser filename must be an executable")
def ExecuteCompare2(command):
"""Executes the Compare2 command."""
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = [url.strip() for url in
open(command["--list"], "r").readlines()[startline:endline]]
log_file = open(command["--logfile"], "w")
outdir = command["--outdir"]
if not outdir: outdir = tempfile.gettempdir()
scrape_info_list = []
class ScrapeInfo(object):
"""Helper class to hold information about a scrape."""
__slots__ = ["browser_path", "scraper", "outdir", "result"]
for index in xrange(1, 3):
scrape_info = ScrapeInfo()
scrape_info.browser_path = command["--browser%d" % index]
scrape_info.scraper = scrapers.GetScraper(
(command["--browser"], command["--browser%dver" % index]))
if command["--browser%dname" % index]:
scrape_info.outdir = os.path.join(outdir,
command["--browser%dname" % index])
else:
scrape_info.outdir = os.path.join(outdir, str(index))
drivers.windowing.PreparePath(scrape_info.outdir)
scrape_info_list.append(scrape_info)
compare = operators.GetOperator("equals_with_mask")
for url in url_list:
success = True
for scrape_info in scrape_info_list:
scrape_info.result = scrape_info.scraper.Scrape(
[url], scrape_info.outdir, command["--size"], (0, 0),
command["--timeout"], path=scrape_info.browser_path)
if not scrape_info.result:
scrape_info.result = "success"
else:
success = False
result = "unknown"
if success:
result = "equal"
file1 = drivers.windowing.URLtoFilename(
url, scrape_info_list[0].outdir, ".bmp")
file2 = drivers.windowing.URLtoFilename(
url, scrape_info_list[1].outdir, ".bmp")
comparison_result = compare.Compare(file1, file2,
maskdir=command["--maskdir"])
if comparison_result is not None:
result = "not-equal"
if command["--diffdir"]:
comparison_result[1].save(
drivers.windowing.URLtoFilename(url, command["--diffdir"], ".bmp"))
# TODO(jhaas): maybe use the logging module rather than raw file writes
log_file.write("%s %s %s %s\n" % (url,
scrape_info_list[0].result,
scrape_info_list[1].result,
result))
| [
"# Copyright (c) 2011 The Chromium Authors. All rights reserved.\n",
"# Use of this source code is governed by a BSD-style license that can be\n",
"# found in the LICENSE file.\n",
"\n",
"\"\"\"SiteCompare command to invoke the same page in two versions of a browser.\n",
"\n",
"Does the easiest compatibility test: equality comparison between two different\n",
"versions of the same browser. Invoked with a series of command line options\n",
"that specify which URLs to check, which browser to use, where to store results,\n",
"etc.\n",
"\"\"\"\n",
"\n",
"import os # Functions for walking the directory tree\n",
"import tempfile # Get a temporary directory to hold intermediates\n",
"\n",
"import command_line\n",
"import drivers # Functions for driving keyboard/mouse/windows, OS-specific\n",
"import operators # Functions that, given two bitmaps as input, produce\n",
" # output depending on the performance of an operation\n",
"import scrapers # Functions that know how to capture a render from\n",
" # particular browsers\n",
"\n",
"\n",
"def CreateCommand(cmdline):\n",
" \"\"\"Inserts the command and arguments into a command line for parsing.\"\"\"\n",
" cmd = cmdline.AddCommand(\n",
" [\"compare2\"],\n",
" \"Compares the output of two browsers on the same URL or list of URLs\",\n",
" ValidateCompare2,\n",
" ExecuteCompare2)\n",
"\n",
" cmd.AddArgument(\n",
" [\"-b1\", \"--browser1\"], \"Full path to first browser's executable\",\n",
" type=\"readfile\", metaname=\"PATH\", required=True)\n",
" cmd.AddArgument(\n",
" [\"-b2\", \"--browser2\"], \"Full path to second browser's executable\",\n",
" type=\"readfile\", metaname=\"PATH\", required=True)\n",
" cmd.AddArgument(\n",
" [\"-b\", \"--browser\"], \"Which browser to use\", type=\"string\",\n",
" default=\"chrome\")\n",
" cmd.AddArgument(\n",
" [\"-b1v\", \"--browser1ver\"], \"Version of first browser\", metaname=\"VERSION\")\n",
" cmd.AddArgument(\n",
" [\"-b2v\", \"--browser2ver\"], \"Version of second browser\", metaname=\"VERSION\")\n",
" cmd.AddArgument(\n",
" [\"-b1n\", \"--browser1name\"], \"Optional name for first browser (used in \"\n",
" \"directory to hold intermediate files)\", metaname=\"NAME\")\n",
" cmd.AddArgument(\n",
" [\"-b2n\", \"--browser2name\"], \"Optional name for second browser (used in \"\n",
" \"directory to hold intermediate files)\", metaname=\"NAME\")\n",
" cmd.AddArgument(\n",
" [\"-o\", \"--outdir\"], \"Directory to store scrape files\", metaname=\"DIR\")\n",
" cmd.AddArgument(\n",
" [\"-u\", \"--url\"], \"URL to compare\")\n",
" cmd.AddArgument(\n",
" [\"-l\", \"--list\"], \"List of URLs to compare\", type=\"readfile\")\n",
" cmd.AddMutualExclusion([\"--url\", \"--list\"])\n",
" cmd.AddArgument(\n",
" [\"-s\", \"--startline\"], \"First line of URL list\", type=\"int\")\n",
" cmd.AddArgument(\n",
" [\"-e\", \"--endline\"], \"Last line of URL list (exclusive)\", type=\"int\")\n",
" cmd.AddArgument(\n",
" [\"-c\", \"--count\"], \"Number of lines of URL file to use\", type=\"int\")\n",
" cmd.AddDependency(\"--startline\", \"--list\")\n",
" cmd.AddRequiredGroup([\"--url\", \"--list\"])\n",
" cmd.AddDependency(\"--endline\", \"--list\")\n",
" cmd.AddDependency(\"--count\", \"--list\")\n",
" cmd.AddMutualExclusion([\"--count\", \"--endline\"])\n",
" cmd.AddDependency(\"--count\", \"--startline\")\n",
" cmd.AddArgument(\n",
" [\"-t\", \"--timeout\"], \"Amount of time (seconds) to wait for browser to \"\n",
" \"finish loading\",\n",
" type=\"int\", default=60)\n",
" cmd.AddArgument(\n",
" [\"-log\", \"--logfile\"], \"File to write output\", type=\"string\", required=True)\n",
" cmd.AddArgument(\n",
" [\"-sz\", \"--size\"], \"Browser window size\", default=(800, 600), type=\"coords\")\n",
" cmd.AddArgument(\n",
" [\"-m\", \"--maskdir\"], \"Path that holds masks to use for comparison\")\n",
" cmd.AddArgument(\n",
" [\"-d\", \"--diffdir\"], \"Path to hold the difference of comparisons that fail\")\n",
"\n",
"\n",
"def ValidateCompare2(command):\n",
" \"\"\"Validate the arguments to compare2. Raises ParseError if failed.\"\"\"\n",
" executables = [\".exe\", \".com\", \".bat\"]\n",
" if (os.path.splitext(command[\"--browser1\"])[1].lower() not in executables or\n",
" os.path.splitext(command[\"--browser2\"])[1].lower() not in executables):\n",
" raise command_line.ParseError(\"Browser filename must be an executable\")\n",
"\n",
"\n",
"def ExecuteCompare2(command):\n",
" \"\"\"Executes the Compare2 command.\"\"\"\n",
" if command[\"--url\"]:\n",
" url_list = [command[\"--url\"]]\n",
" else:\n",
" startline = command[\"--startline\"]\n",
" if command[\"--count\"]:\n",
" endline = startline+command[\"--count\"]\n",
" else:\n",
" endline = command[\"--endline\"]\n",
" url_list = [url.strip() for url in\n",
" open(command[\"--list\"], \"r\").readlines()[startline:endline]]\n",
"\n",
" log_file = open(command[\"--logfile\"], \"w\")\n",
"\n",
" outdir = command[\"--outdir\"]\n",
" if not outdir: outdir = tempfile.gettempdir()\n",
"\n",
" scrape_info_list = []\n",
"\n",
" class ScrapeInfo(object):\n",
" \"\"\"Helper class to hold information about a scrape.\"\"\"\n",
" __slots__ = [\"browser_path\", \"scraper\", \"outdir\", \"result\"]\n",
"\n",
" for index in xrange(1, 3):\n",
" scrape_info = ScrapeInfo()\n",
" scrape_info.browser_path = command[\"--browser%d\" % index]\n",
" scrape_info.scraper = scrapers.GetScraper(\n",
" (command[\"--browser\"], command[\"--browser%dver\" % index]))\n",
"\n",
" if command[\"--browser%dname\" % index]:\n",
" scrape_info.outdir = os.path.join(outdir,\n",
" command[\"--browser%dname\" % index])\n",
" else:\n",
" scrape_info.outdir = os.path.join(outdir, str(index))\n",
"\n",
" drivers.windowing.PreparePath(scrape_info.outdir)\n",
" scrape_info_list.append(scrape_info)\n",
"\n",
" compare = operators.GetOperator(\"equals_with_mask\")\n",
"\n",
" for url in url_list:\n",
" success = True\n",
"\n",
" for scrape_info in scrape_info_list:\n",
" scrape_info.result = scrape_info.scraper.Scrape(\n",
" [url], scrape_info.outdir, command[\"--size\"], (0, 0),\n",
" command[\"--timeout\"], path=scrape_info.browser_path)\n",
"\n",
" if not scrape_info.result:\n",
" scrape_info.result = \"success\"\n",
" else:\n",
" success = False\n",
"\n",
" result = \"unknown\"\n",
"\n",
" if success:\n",
" result = \"equal\"\n",
"\n",
" file1 = drivers.windowing.URLtoFilename(\n",
" url, scrape_info_list[0].outdir, \".bmp\")\n",
" file2 = drivers.windowing.URLtoFilename(\n",
" url, scrape_info_list[1].outdir, \".bmp\")\n",
"\n",
" comparison_result = compare.Compare(file1, file2,\n",
" maskdir=command[\"--maskdir\"])\n",
"\n",
" if comparison_result is not None:\n",
" result = \"not-equal\"\n",
"\n",
" if command[\"--diffdir\"]:\n",
" comparison_result[1].save(\n",
" drivers.windowing.URLtoFilename(url, command[\"--diffdir\"], \".bmp\"))\n",
"\n",
" # TODO(jhaas): maybe use the logging module rather than raw file writes\n",
" log_file.write(\"%s %s %s %s\\n\" % (url,\n",
" scrape_info_list[0].result,\n",
" scrape_info_list[1].result,\n",
" result))\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0.02666666666666667,
0,
0.046511627906976744,
0,
0,
0,
0.013333333333333334,
0.03571428571428571,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0.05263157894736842,
0,
0,
0.05263157894736842,
0,
0,
0.05263157894736842,
0,
0.05263157894736842,
0,
0.05263157894736842,
0,
0,
0.05263157894736842,
0,
0,
0.05263157894736842,
0,
0.05263157894736842,
0,
0.05263157894736842,
0,
0.021739130434782608,
0.05263157894736842,
0,
0.05263157894736842,
0,
0.05263157894736842,
0,
0.022222222222222223,
0.022727272727272728,
0.023255813953488372,
0.024390243902439025,
0.0196078431372549,
0.021739130434782608,
0.05263157894736842,
0,
0,
0,
0.05263157894736842,
0.012345679012345678,
0.05263157894736842,
0.012345679012345678,
0.05263157894736842,
0,
0.05263157894736842,
0.012345679012345678,
0,
0,
0,
0.0136986301369863,
0.024390243902439025,
0.012658227848101266,
0.01282051282051282,
0,
0,
0,
0,
0.02564102564102564,
0.043478260869565216,
0,
0.125,
0,
0,
0.022222222222222223,
0,
0.02702702702702703,
0,
0,
0,
0.022222222222222223,
0,
0.03225806451612903,
0.041666666666666664,
0,
0.041666666666666664,
0,
0.03571428571428571,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0.016666666666666666,
0,
0,
0,
0,
0.018518518518518517,
0,
0.043478260869565216,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0.030303030303030304,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0.02127659574468085,
0,
0.02127659574468085,
0,
0,
0.017857142857142856,
0,
0,
0.025,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0
] | 170 | 0.012805 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bomlib.columns import ColumnList
from bomlib.preferences import BomPref
import bomlib.units as units
from bomlib.sort import natural_sort
import re
import sys
DNF = [
"dnf",
"dnl",
"dnp",
"do not fit",
"do not place",
"do not load",
"nofit",
"nostuff",
"noplace",
"noload",
"not fitted",
"not loaded",
"not placed",
"no stuff",
]
class Component():
"""Class for a component, aka 'comp' in the xml netlist file.
This component class is implemented by wrapping an xmlElement instance
with accessors. The xmlElement is held in field 'element'.
"""
def __init__(self, xml_element, prefs=None):
self.element = xml_element
self.libpart = None
if not prefs:
prefs = BomPref()
self.prefs = prefs
# Set to true when this component is included in a component group
self.grouped = False
# Compare the value of this part, to the value of another part (see if they match)
def compareValue(self, other):
# Simple string comparison
if self.getValue().lower() == other.getValue().lower():
return True
# Otherwise, perform a more complicated value comparison
if units.compareValues(self.getValue(), other.getValue()):
return True
# Ignore value if both components are connectors
if self.prefs.groupConnectors:
if 'connector' in self.getLibName().lower() and 'connector' in other.getLibName().lower():
return True
# No match, return False
return False
# Determine if two parts have the same name
def comparePartName(self, other):
pn1 = self.getPartName().lower()
pn2 = other.getPartName().lower()
# Simple direct match
if pn1 == pn2:
return True
# Compare part aliases e.g. "c" to "c_small"
for alias in self.prefs.aliases:
if pn1 in alias and pn2 in alias:
return True
return False
def compareField(self, other, field):
this_field = self.getField(field).lower()
other_field = other.getField(field).lower()
# If blank comparisons are allowed
if this_field == "" or other_field == "":
if not self.prefs.mergeBlankFields:
return False
if this_field == other_field:
return True
return False
def __eq__(self, other):
"""
Equivalency operator is used to determine if two parts are 'equal'
"""
# 'fitted' value must be the same for both parts
if self.isFitted() != other.isFitted():
return False
if len(self.prefs.groups) == 0:
return False
for c in self.prefs.groups:
# Perform special matches
if c.lower() == ColumnList.COL_VALUE.lower():
if not self.compareValue(other):
return False
# Match part name
elif c.lower() == ColumnList.COL_PART.lower():
if not self.comparePartName(other):
return False
# Generic match
elif not self.compareField(other, c):
return False
return True
def setLibPart(self, part):
self.libpart = part
def getPrefix(self):
"""
Get the reference prefix
e.g. if this component has a reference U12, will return "U"
"""
prefix = ""
for c in self.getRef():
if c.isalpha():
prefix += c
else:
break
return prefix
def getSuffix(self):
"""
Return the reference suffix #
e.g. if this component has a reference U12, will return "12"
"""
suffix = ""
for c in self.getRef():
if c.isalpha():
suffix = ""
else:
suffix += c
return int(suffix)
def getLibPart(self):
return self.libpart
def getPartName(self):
return self.element.get("libsource", "part")
def getLibName(self):
return self.element.get("libsource", "lib")
def getDescription(self):
try:
return self.element.get("libsource", "description")
except:
# Compatibility with old KiCad versions (4.x)
ret = self.element.get("field", "name", "description")
if ret == "":
ret = self.libpart.getDescription()
return ret
def setValue(self, value):
"""Set the value of this component"""
v = self.element.getChild("value")
if v:
v.setChars(value)
def getValue(self):
return self.element.get("value")
def getField(self, name, ignoreCase=True, libraryToo=True):
"""Return the value of a field named name. The component is first
checked for the field, and then the components library part is checked
for the field. If the field doesn't exist in either, an empty string is
returned
Keywords:
name -- The name of the field to return the value for
libraryToo -- look in the libpart's fields for the same name if not found
in component itself
"""
fp = self.getFootprint().split(":")
if name.lower() == ColumnList.COL_REFERENCE.lower():
return self.getRef().strip()
elif name.lower() == ColumnList.COL_DESCRIPTION.lower():
return self.getDescription().strip()
elif name.lower() == ColumnList.COL_DATASHEET.lower():
return self.getDatasheet().strip()
# Footprint library is first element
elif name.lower() == ColumnList.COL_FP_LIB.lower():
if len(fp) > 1:
return fp[0].strip()
else:
# Explicit empty return
return ""
elif name.lower() == ColumnList.COL_FP.lower():
if len(fp) > 1:
return fp[1].strip()
elif len(fp) == 1:
return fp[0]
else:
return ""
elif name.lower() == ColumnList.COL_VALUE.lower():
return self.getValue().strip()
elif name.lower() == ColumnList.COL_PART.lower():
return self.getPartName().strip()
elif name.lower() == ColumnList.COL_PART_LIB.lower():
return self.getLibName().strip()
# Other fields (case insensitive)
for f in self.getFieldNames():
if f.lower() == name.lower():
field = self.element.get("field", "name", f)
if field == "" and libraryToo:
field = self.libpart.getField(f)
return field.strip()
# Could not find a matching field
return ""
def getFieldNames(self):
"""Return a list of field names in play for this component. Mandatory
fields are not included, and they are: Value, Footprint, Datasheet, Ref.
The netlist format only includes fields with non-empty values. So if a field
is empty, it will not be present in the returned list.
"""
fieldNames = []
fields = self.element.getChild('fields')
if fields:
for f in fields.getChildren():
fieldNames.append(f.get('field', 'name'))
return fieldNames
def getRef(self):
return self.element.get("comp", "ref")
# Determine if a component is FITTED or not
def isFitted(self):
check = self.getField(self.prefs.configField).lower()
# Check the value field first
if self.getValue().lower() in DNF:
return False
# Empty value means part is fitted
if check == "":
return True
opts = check.lower().split(",")
exclude = False
include = True
for opt in opts:
opt = opt.strip()
# Any option containing a DNF is not fitted
if opt in DNF:
exclude = True
break
# Options that start with '-' are explicitly removed from certain configurations
if opt.startswith("-") and str(opt[1:]) in [str(cfg) for cfg in self.prefs.pcbConfig]:
exclude = True
break
if opt.startswith("+"):
include = include or opt[1:] in [str(cfg) for cfg in self.prefs.pcbConfig]
return include and not exclude
# Test if this part should be included, based on any regex expressions provided in the preferences
def testRegExclude(self):
for reg in self.prefs.regExcludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
# Attempt unicode escaping...
# Filthy hack
try:
regex = regex.decode("unicode_escape")
except:
pass
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("Excluding '{ref}': Field '{field}' ({value}) matched '{reg}'".format(
ref=self.getRef(),
field=field_name,
value=field_value,
reg=regex).encode('utf-8'))
# Found a match
return True
# Default, could not find any matches
return False
def testRegInclude(self):
if len(self.prefs.regIncludes) == 0: # Nothing to match against
return True
for reg in self.prefs.regIncludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
print(field_name, field_value, regex)
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("")
# Found a match
return True
# Default, could not find a match
return False
def getFootprint(self, libraryToo=True):
ret = self.element.get("footprint")
if ret == "" and libraryToo:
if self.libpart:
ret = self.libpart.getFootprint()
return ret
def getDatasheet(self, libraryToo=True):
ret = self.element.get("datasheet")
if ret == "" and libraryToo:
ret = self.libpart.getDatasheet()
return ret
def getTimestamp(self):
return self.element.get("tstamp")
class joiner:
def __init__(self):
self.stack = []
def add(self, P, N):
if self.stack == []:
self.stack.append(((P, N), (P, N)))
return
S, E = self.stack[-1]
if N == E[1] + 1:
self.stack[-1] = (S, (P, N))
else:
self.stack.append(((P, N), (P, N)))
def flush(self, sep, N=None, dash='-'):
refstr = u''
c = 0
for Q in self.stack:
if bool(N) and c != 0 and c % N == 0:
refstr += u'\n'
elif c != 0:
refstr += sep
S, E = Q
if S == E:
refstr += "%s%d" % S
c += 1
else:
# Do we have space?
if bool(N) and (c + 1) % N == 0:
refstr += u'\n'
c += 1
refstr += "%s%d%s%s%d" % (S[0], S[1], dash, E[0], E[1])
c += 2
return refstr
class ComponentGroup():
"""
Initialize the group with no components, and default fields
"""
def __init__(self, prefs=None):
self.components = []
self.fields = dict.fromkeys(ColumnList._COLUMNS_DEFAULT) # Columns loaded from KiCad
if not prefs:
prefs = BomPref()
self.prefs = prefs
def getField(self, field):
if field not in self.fields.keys():
return ""
if not self.fields[field]:
return ""
return u''.join((self.fields[field]))
def getCount(self):
return len(self.components)
# Test if a given component fits in this group
def matchComponent(self, c):
if len(self.components) == 0:
return True
if c == self.components[0]:
return True
return False
def containsComponent(self, c):
# Test if a given component is already contained in this grop
if not self.matchComponent(c):
return False
for comp in self.components:
if comp.getRef() == c.getRef():
return True
return False
def addComponent(self, c):
# Add a component to the group
if self.containsComponent(c):
return
self.components.append(c)
def isFitted(self):
return any([c.isFitted() for c in self.components])
def getRefs(self):
# Return a list of the components
return " ".join([c.getRef() for c in self.components])
def getAltRefs(self, wrapN=None):
S = joiner()
for n in self.components:
P, N = (n.getPrefix(), n.getSuffix())
S.add(P, N)
return S.flush(' ', N=wrapN)
# Sort the components in correct order
def sortComponents(self):
self.components = sorted(self.components, key=lambda c: natural_sort(c.getRef()))
# Update a given field, based on some rules and such
def updateField(self, field, fieldData):
# Protected fields cannot be overwritten
if field in ColumnList._COLUMNS_PROTECTED:
return
if field is None or field == "":
return
elif fieldData == "" or fieldData is None:
return
if (field not in self.fields.keys()) or (self.fields[field] is None) or (self.fields[field] == ""):
self.fields[field] = fieldData
elif fieldData.lower() in self.fields[field].lower():
return
else:
print("Field conflict: ({refs}) [{name}] : '{flds}' <- '{fld}'".format(
refs=self.getRefs(),
name=field,
flds=self.fields[field],
fld=fieldData).encode('utf-8'))
self.fields[field] += " " + fieldData
def updateFields(self, usealt=False, wrapN=None):
for c in self.components:
for f in c.getFieldNames():
# These columns are handled explicitly below
if f in ColumnList._COLUMNS_PROTECTED:
continue
self.updateField(f, c.getField(f))
# Update 'global' fields
if usealt:
self.fields[ColumnList.COL_REFERENCE] = self.getAltRefs(wrapN)
else:
self.fields[ColumnList.COL_REFERENCE] = self.getRefs()
q = self.getCount()
self.fields[ColumnList.COL_GRP_QUANTITY] = "{n}{dnf}".format(
n=q,
dnf=" (DNF)" if not self.isFitted() else "")
self.fields[ColumnList.COL_GRP_BUILD_QUANTITY] = str(q * self.prefs.boards) if self.isFitted() else "0"
if self.prefs.agregateValues:
self.fields[ColumnList.COL_VALUE] = ','.join(sorted(set([c.getValue() for c in self.components])))
else:
self.fields[ColumnList.COL_VALUE] = self.components[0].getValue()
self.fields[ColumnList.COL_PART] = self.components[0].getPartName()
self.fields[ColumnList.COL_PART_LIB] = self.components[0].getLibName()
self.fields[ColumnList.COL_DESCRIPTION] = self.components[0].getDescription()
self.fields[ColumnList.COL_DATASHEET] = self.components[0].getDatasheet()
# Footprint field requires special attention
fp = self.components[0].getFootprint().split(":")
if len(fp) >= 2:
self.fields[ColumnList.COL_FP_LIB] = fp[0]
self.fields[ColumnList.COL_FP] = fp[1]
elif len(fp) == 1:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = fp[0]
else:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = ""
# Return a dict of the KiCad data based on the supplied columns
# NOW WITH UNICODE SUPPORT!
def getRow(self, columns):
row = []
for key in columns:
val = self.getField(key)
if val is None:
val = ""
else:
val = u'' + val
if sys.version_info[0] < 3:
val = val.encode('utf-8')
row.append(val)
return row
| [
"# -*- coding: utf-8 -*-\n",
"from __future__ import unicode_literals\n",
"\n",
"from bomlib.columns import ColumnList\n",
"from bomlib.preferences import BomPref\n",
"import bomlib.units as units\n",
"from bomlib.sort import natural_sort\n",
"import re\n",
"import sys\n",
"\n",
"DNF = [\n",
" \"dnf\",\n",
" \"dnl\",\n",
" \"dnp\",\n",
" \"do not fit\",\n",
" \"do not place\",\n",
" \"do not load\",\n",
" \"nofit\",\n",
" \"nostuff\",\n",
" \"noplace\",\n",
" \"noload\",\n",
" \"not fitted\",\n",
" \"not loaded\",\n",
" \"not placed\",\n",
" \"no stuff\",\n",
"]\n",
"\n",
"\n",
"class Component():\n",
" \"\"\"Class for a component, aka 'comp' in the xml netlist file.\n",
" This component class is implemented by wrapping an xmlElement instance\n",
" with accessors. The xmlElement is held in field 'element'.\n",
" \"\"\"\n",
"\n",
" def __init__(self, xml_element, prefs=None):\n",
" self.element = xml_element\n",
" self.libpart = None\n",
"\n",
" if not prefs:\n",
" prefs = BomPref()\n",
"\n",
" self.prefs = prefs\n",
"\n",
" # Set to true when this component is included in a component group\n",
" self.grouped = False\n",
"\n",
" # Compare the value of this part, to the value of another part (see if they match)\n",
" def compareValue(self, other):\n",
" # Simple string comparison\n",
" if self.getValue().lower() == other.getValue().lower():\n",
" return True\n",
"\n",
" # Otherwise, perform a more complicated value comparison\n",
" if units.compareValues(self.getValue(), other.getValue()):\n",
" return True\n",
"\n",
" # Ignore value if both components are connectors\n",
" if self.prefs.groupConnectors:\n",
" if 'connector' in self.getLibName().lower() and 'connector' in other.getLibName().lower():\n",
" return True\n",
"\n",
" # No match, return False\n",
" return False\n",
"\n",
" # Determine if two parts have the same name\n",
" def comparePartName(self, other):\n",
" pn1 = self.getPartName().lower()\n",
" pn2 = other.getPartName().lower()\n",
"\n",
" # Simple direct match\n",
" if pn1 == pn2:\n",
" return True\n",
"\n",
" # Compare part aliases e.g. \"c\" to \"c_small\"\n",
" for alias in self.prefs.aliases:\n",
" if pn1 in alias and pn2 in alias:\n",
" return True\n",
"\n",
" return False\n",
"\n",
" def compareField(self, other, field):\n",
"\n",
" this_field = self.getField(field).lower()\n",
" other_field = other.getField(field).lower()\n",
"\n",
" # If blank comparisons are allowed\n",
" if this_field == \"\" or other_field == \"\":\n",
" if not self.prefs.mergeBlankFields:\n",
" return False\n",
"\n",
" if this_field == other_field:\n",
" return True\n",
"\n",
" return False\n",
"\n",
" def __eq__(self, other):\n",
" \"\"\"\n",
" Equivalency operator is used to determine if two parts are 'equal'\n",
" \"\"\"\n",
" \n",
" # 'fitted' value must be the same for both parts\n",
" if self.isFitted() != other.isFitted():\n",
" return False\n",
"\n",
" if len(self.prefs.groups) == 0:\n",
" return False\n",
"\n",
" for c in self.prefs.groups:\n",
" # Perform special matches\n",
" if c.lower() == ColumnList.COL_VALUE.lower():\n",
" if not self.compareValue(other):\n",
" return False\n",
" # Match part name\n",
" elif c.lower() == ColumnList.COL_PART.lower():\n",
" if not self.comparePartName(other):\n",
" return False\n",
"\n",
" # Generic match\n",
" elif not self.compareField(other, c):\n",
" return False\n",
"\n",
" return True\n",
"\n",
" def setLibPart(self, part):\n",
" self.libpart = part\n",
"\n",
" def getPrefix(self):\n",
" \"\"\"\n",
" Get the reference prefix\n",
" e.g. if this component has a reference U12, will return \"U\"\n",
" \"\"\"\n",
" \n",
" prefix = \"\"\n",
"\n",
" for c in self.getRef():\n",
" if c.isalpha():\n",
" prefix += c\n",
" else:\n",
" break\n",
"\n",
" return prefix\n",
"\n",
" def getSuffix(self):\n",
" \"\"\"\n",
" Return the reference suffix #\n",
" e.g. if this component has a reference U12, will return \"12\"\n",
" \"\"\"\n",
" \n",
" suffix = \"\"\n",
"\n",
" for c in self.getRef():\n",
" if c.isalpha():\n",
" suffix = \"\"\n",
" else:\n",
" suffix += c\n",
"\n",
" return int(suffix)\n",
"\n",
" def getLibPart(self):\n",
" return self.libpart\n",
"\n",
" def getPartName(self):\n",
" return self.element.get(\"libsource\", \"part\")\n",
"\n",
" def getLibName(self):\n",
" return self.element.get(\"libsource\", \"lib\")\n",
"\n",
" def getDescription(self):\n",
" try:\n",
" return self.element.get(\"libsource\", \"description\")\n",
" except:\n",
" # Compatibility with old KiCad versions (4.x)\n",
" ret = self.element.get(\"field\", \"name\", \"description\")\n",
"\n",
" if ret == \"\":\n",
" ret = self.libpart.getDescription()\n",
"\n",
" return ret\n",
"\n",
" def setValue(self, value):\n",
" \"\"\"Set the value of this component\"\"\"\n",
" v = self.element.getChild(\"value\")\n",
" if v:\n",
" v.setChars(value)\n",
"\n",
" def getValue(self):\n",
" return self.element.get(\"value\")\n",
"\n",
" def getField(self, name, ignoreCase=True, libraryToo=True):\n",
" \"\"\"Return the value of a field named name. The component is first\n",
" checked for the field, and then the components library part is checked\n",
" for the field. If the field doesn't exist in either, an empty string is\n",
" returned\n",
"\n",
" Keywords:\n",
" name -- The name of the field to return the value for\n",
" libraryToo -- look in the libpart's fields for the same name if not found\n",
" in component itself\n",
" \"\"\"\n",
"\n",
" fp = self.getFootprint().split(\":\")\n",
"\n",
" if name.lower() == ColumnList.COL_REFERENCE.lower():\n",
" return self.getRef().strip()\n",
"\n",
" elif name.lower() == ColumnList.COL_DESCRIPTION.lower():\n",
" return self.getDescription().strip()\n",
"\n",
" elif name.lower() == ColumnList.COL_DATASHEET.lower():\n",
" return self.getDatasheet().strip()\n",
"\n",
" # Footprint library is first element\n",
" elif name.lower() == ColumnList.COL_FP_LIB.lower():\n",
" if len(fp) > 1:\n",
" return fp[0].strip()\n",
" else:\n",
" # Explicit empty return\n",
" return \"\"\n",
"\n",
" elif name.lower() == ColumnList.COL_FP.lower():\n",
" if len(fp) > 1:\n",
" return fp[1].strip()\n",
" elif len(fp) == 1:\n",
" return fp[0]\n",
" else:\n",
" return \"\"\n",
"\n",
" elif name.lower() == ColumnList.COL_VALUE.lower():\n",
" return self.getValue().strip()\n",
"\n",
" elif name.lower() == ColumnList.COL_PART.lower():\n",
" return self.getPartName().strip()\n",
"\n",
" elif name.lower() == ColumnList.COL_PART_LIB.lower():\n",
" return self.getLibName().strip()\n",
"\n",
" # Other fields (case insensitive)\n",
" for f in self.getFieldNames():\n",
" if f.lower() == name.lower():\n",
" field = self.element.get(\"field\", \"name\", f)\n",
"\n",
" if field == \"\" and libraryToo:\n",
" field = self.libpart.getField(f)\n",
"\n",
" return field.strip()\n",
"\n",
" # Could not find a matching field\n",
" return \"\"\n",
"\n",
" def getFieldNames(self):\n",
" \"\"\"Return a list of field names in play for this component. Mandatory\n",
" fields are not included, and they are: Value, Footprint, Datasheet, Ref.\n",
" The netlist format only includes fields with non-empty values. So if a field\n",
" is empty, it will not be present in the returned list.\n",
" \"\"\"\n",
"\n",
" fieldNames = []\n",
" \n",
" fields = self.element.getChild('fields')\n",
" \n",
" if fields:\n",
" for f in fields.getChildren():\n",
" fieldNames.append(f.get('field', 'name'))\n",
" \n",
" return fieldNames\n",
"\n",
" def getRef(self):\n",
" return self.element.get(\"comp\", \"ref\")\n",
"\n",
" # Determine if a component is FITTED or not\n",
" def isFitted(self):\n",
"\n",
" check = self.getField(self.prefs.configField).lower()\n",
"\n",
" # Check the value field first\n",
" if self.getValue().lower() in DNF:\n",
" return False\n",
"\n",
" # Empty value means part is fitted\n",
" if check == \"\":\n",
" return True\n",
"\n",
" opts = check.lower().split(\",\")\n",
"\n",
" exclude = False\n",
" include = True\n",
"\n",
" for opt in opts:\n",
" opt = opt.strip()\n",
" # Any option containing a DNF is not fitted\n",
" if opt in DNF:\n",
" exclude = True\n",
" break\n",
" \n",
" # Options that start with '-' are explicitly removed from certain configurations\n",
" if opt.startswith(\"-\") and str(opt[1:]) in [str(cfg) for cfg in self.prefs.pcbConfig]:\n",
" exclude = True\n",
" break\n",
" if opt.startswith(\"+\"):\n",
" include = include or opt[1:] in [str(cfg) for cfg in self.prefs.pcbConfig]\n",
"\n",
" return include and not exclude\n",
"\n",
" # Test if this part should be included, based on any regex expressions provided in the preferences\n",
" def testRegExclude(self):\n",
"\n",
" for reg in self.prefs.regExcludes:\n",
"\n",
" if type(reg) == list and len(reg) == 2:\n",
" field_name, regex = reg\n",
" field_value = self.getField(field_name)\n",
"\n",
" # Attempt unicode escaping...\n",
" # Filthy hack\n",
" try:\n",
" regex = regex.decode(\"unicode_escape\")\n",
" except:\n",
" pass\n",
"\n",
" if re.search(regex, field_value, flags=re.IGNORECASE) is not None:\n",
" if self.prefs.verbose:\n",
" print(\"Excluding '{ref}': Field '{field}' ({value}) matched '{reg}'\".format(\n",
" ref=self.getRef(),\n",
" field=field_name,\n",
" value=field_value,\n",
" reg=regex).encode('utf-8'))\n",
"\n",
" # Found a match\n",
" return True\n",
"\n",
" # Default, could not find any matches\n",
" return False\n",
"\n",
" def testRegInclude(self):\n",
"\n",
" if len(self.prefs.regIncludes) == 0: # Nothing to match against\n",
" return True\n",
"\n",
" for reg in self.prefs.regIncludes:\n",
"\n",
" if type(reg) == list and len(reg) == 2:\n",
" field_name, regex = reg\n",
" field_value = self.getField(field_name)\n",
"\n",
" print(field_name, field_value, regex)\n",
"\n",
" if re.search(regex, field_value, flags=re.IGNORECASE) is not None:\n",
" if self.prefs.verbose:\n",
" print(\"\")\n",
"\n",
" # Found a match\n",
" return True\n",
"\n",
" # Default, could not find a match\n",
" return False\n",
"\n",
" def getFootprint(self, libraryToo=True):\n",
" ret = self.element.get(\"footprint\")\n",
" if ret == \"\" and libraryToo:\n",
" if self.libpart:\n",
" ret = self.libpart.getFootprint()\n",
" return ret\n",
"\n",
" def getDatasheet(self, libraryToo=True):\n",
" ret = self.element.get(\"datasheet\")\n",
" if ret == \"\" and libraryToo:\n",
" ret = self.libpart.getDatasheet()\n",
" return ret\n",
"\n",
" def getTimestamp(self):\n",
" return self.element.get(\"tstamp\")\n",
"\n",
"\n",
"class joiner:\n",
" def __init__(self):\n",
" self.stack = []\n",
"\n",
" def add(self, P, N):\n",
"\n",
" if self.stack == []:\n",
" self.stack.append(((P, N), (P, N)))\n",
" return\n",
"\n",
" S, E = self.stack[-1]\n",
"\n",
" if N == E[1] + 1:\n",
" self.stack[-1] = (S, (P, N))\n",
" else:\n",
" self.stack.append(((P, N), (P, N)))\n",
"\n",
" def flush(self, sep, N=None, dash='-'):\n",
" \n",
" refstr = u''\n",
" c = 0\n",
"\n",
" for Q in self.stack:\n",
" if bool(N) and c != 0 and c % N == 0:\n",
" refstr += u'\\n'\n",
" elif c != 0:\n",
" refstr += sep\n",
"\n",
" S, E = Q\n",
"\n",
" if S == E:\n",
" refstr += \"%s%d\" % S\n",
" c += 1\n",
" else:\n",
" # Do we have space?\n",
" if bool(N) and (c + 1) % N == 0:\n",
" refstr += u'\\n'\n",
" c += 1\n",
"\n",
" refstr += \"%s%d%s%s%d\" % (S[0], S[1], dash, E[0], E[1])\n",
" c += 2\n",
" return refstr\n",
"\n",
"\n",
"class ComponentGroup():\n",
"\n",
" \"\"\"\n",
" Initialize the group with no components, and default fields\n",
" \"\"\"\n",
" def __init__(self, prefs=None):\n",
" self.components = []\n",
" self.fields = dict.fromkeys(ColumnList._COLUMNS_DEFAULT) # Columns loaded from KiCad\n",
"\n",
" if not prefs:\n",
" prefs = BomPref()\n",
"\n",
" self.prefs = prefs\n",
"\n",
" def getField(self, field):\n",
"\n",
" if field not in self.fields.keys():\n",
" return \"\"\n",
" \n",
" if not self.fields[field]:\n",
" return \"\"\n",
" \n",
" return u''.join((self.fields[field]))\n",
"\n",
" def getCount(self):\n",
" return len(self.components)\n",
"\n",
" # Test if a given component fits in this group\n",
" def matchComponent(self, c):\n",
" if len(self.components) == 0:\n",
" return True\n",
" if c == self.components[0]:\n",
" return True\n",
"\n",
" return False\n",
"\n",
" def containsComponent(self, c):\n",
" # Test if a given component is already contained in this grop\n",
" if not self.matchComponent(c):\n",
" return False\n",
"\n",
" for comp in self.components:\n",
" if comp.getRef() == c.getRef():\n",
" return True\n",
"\n",
" return False\n",
"\n",
" def addComponent(self, c):\n",
" # Add a component to the group\n",
" if self.containsComponent(c):\n",
" return\n",
"\n",
" self.components.append(c)\n",
"\n",
" def isFitted(self):\n",
" return any([c.isFitted() for c in self.components])\n",
"\n",
" def getRefs(self):\n",
" # Return a list of the components\n",
" return \" \".join([c.getRef() for c in self.components])\n",
"\n",
" def getAltRefs(self, wrapN=None):\n",
" S = joiner()\n",
"\n",
" for n in self.components:\n",
" P, N = (n.getPrefix(), n.getSuffix())\n",
" S.add(P, N)\n",
"\n",
" return S.flush(' ', N=wrapN)\n",
"\n",
" # Sort the components in correct order\n",
" def sortComponents(self):\n",
" self.components = sorted(self.components, key=lambda c: natural_sort(c.getRef()))\n",
"\n",
" # Update a given field, based on some rules and such\n",
" def updateField(self, field, fieldData):\n",
"\n",
" # Protected fields cannot be overwritten\n",
" if field in ColumnList._COLUMNS_PROTECTED:\n",
" return\n",
"\n",
" if field is None or field == \"\":\n",
" return\n",
" elif fieldData == \"\" or fieldData is None:\n",
" return\n",
"\n",
" if (field not in self.fields.keys()) or (self.fields[field] is None) or (self.fields[field] == \"\"):\n",
" self.fields[field] = fieldData\n",
" elif fieldData.lower() in self.fields[field].lower():\n",
" return\n",
" else:\n",
" print(\"Field conflict: ({refs}) [{name}] : '{flds}' <- '{fld}'\".format(\n",
" refs=self.getRefs(),\n",
" name=field,\n",
" flds=self.fields[field],\n",
" fld=fieldData).encode('utf-8'))\n",
" self.fields[field] += \" \" + fieldData\n",
"\n",
" def updateFields(self, usealt=False, wrapN=None):\n",
" for c in self.components:\n",
" for f in c.getFieldNames():\n",
"\n",
" # These columns are handled explicitly below\n",
" if f in ColumnList._COLUMNS_PROTECTED:\n",
" continue\n",
"\n",
" self.updateField(f, c.getField(f))\n",
"\n",
" # Update 'global' fields\n",
" if usealt:\n",
" self.fields[ColumnList.COL_REFERENCE] = self.getAltRefs(wrapN)\n",
" else:\n",
" self.fields[ColumnList.COL_REFERENCE] = self.getRefs()\n",
"\n",
" q = self.getCount()\n",
" self.fields[ColumnList.COL_GRP_QUANTITY] = \"{n}{dnf}\".format(\n",
" n=q,\n",
" dnf=\" (DNF)\" if not self.isFitted() else \"\")\n",
"\n",
" self.fields[ColumnList.COL_GRP_BUILD_QUANTITY] = str(q * self.prefs.boards) if self.isFitted() else \"0\"\n",
" if self.prefs.agregateValues:\n",
" self.fields[ColumnList.COL_VALUE] = ','.join(sorted(set([c.getValue() for c in self.components])))\n",
" else:\n",
" self.fields[ColumnList.COL_VALUE] = self.components[0].getValue()\n",
"\n",
" self.fields[ColumnList.COL_PART] = self.components[0].getPartName()\n",
" self.fields[ColumnList.COL_PART_LIB] = self.components[0].getLibName()\n",
" self.fields[ColumnList.COL_DESCRIPTION] = self.components[0].getDescription()\n",
" self.fields[ColumnList.COL_DATASHEET] = self.components[0].getDatasheet()\n",
"\n",
" # Footprint field requires special attention\n",
" fp = self.components[0].getFootprint().split(\":\")\n",
"\n",
" if len(fp) >= 2:\n",
" self.fields[ColumnList.COL_FP_LIB] = fp[0]\n",
" self.fields[ColumnList.COL_FP] = fp[1]\n",
" elif len(fp) == 1:\n",
" self.fields[ColumnList.COL_FP_LIB] = \"\"\n",
" self.fields[ColumnList.COL_FP] = fp[0]\n",
" else:\n",
" self.fields[ColumnList.COL_FP_LIB] = \"\"\n",
" self.fields[ColumnList.COL_FP] = \"\"\n",
"\n",
" # Return a dict of the KiCad data based on the supplied columns\n",
" # NOW WITH UNICODE SUPPORT!\n",
" def getRow(self, columns):\n",
" row = []\n",
" for key in columns:\n",
" val = self.getField(key)\n",
"\n",
" if val is None:\n",
" val = \"\"\n",
" else:\n",
" val = u'' + val\n",
" if sys.version_info[0] < 3:\n",
" val = val.encode('utf-8')\n",
"\n",
" row.append(val)\n",
"\n",
" return row\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.011627906976744186,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0.010752688172043012,
0.010101010101010102,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0.012048192771084338,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008928571428571428,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0.011627906976744186,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 577 | 0.002476 |
"""Tests 1D angular weights"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_2d, get_test_parameter_set
def test_angle_offset():
"""
Tests if things are still correct when there is a 2PI offset in the angles.
"""
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(2)
# reference
r1 = []
for p in parameters:
f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)
r1.append(f1)
# with offset
angles[::2] += 2*np.pi*np.arange(angles[::2].shape[0])
r2 = []
for p in parameters:
f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)
r2.append(f2)
# with offset and weights
r3 = []
for p in parameters:
f3 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r3.append(f3)
assert np.allclose(np.array(r1).flatten().view(float),
np.array(r2).flatten().view(float))
assert np.allclose(np.array(r2).flatten().view(float),
np.array(r3).flatten().view(float))
def test_angle_swap():
"""
Test if everything still works, when angles are swapped.
"""
sino, angles = create_test_sino_2d()
# remove elements so that we can see that weighting works
angles = angles[:-2]
sino = sino[:-2, :]
parameters = get_test_parameter_set(2)
# reference
r1 = []
for p in parameters:
f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r1.append(f1)
# change order of angles
order = np.argsort(angles % .5)
angles = angles[order]
sino = sino[order, :]
r2 = []
for p in parameters:
f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r2.append(f2)
assert np.allclose(np.array(r1).flatten().view(float),
np.array(r2).flatten().view(float))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
| [
"\"\"\"Tests 1D angular weights\"\"\"\n",
"import numpy as np\n",
"\n",
"import odtbrain\n",
"\n",
"from common_methods import create_test_sino_2d, get_test_parameter_set\n",
"\n",
"\n",
"def test_angle_offset():\n",
" \"\"\"\n",
" Tests if things are still correct when there is a 2PI offset in the angles.\n",
" \"\"\"\n",
" sino, angles = create_test_sino_2d()\n",
" parameters = get_test_parameter_set(2)\n",
" # reference\n",
" r1 = []\n",
" for p in parameters:\n",
" f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)\n",
" r1.append(f1)\n",
" # with offset\n",
" angles[::2] += 2*np.pi*np.arange(angles[::2].shape[0])\n",
" r2 = []\n",
" for p in parameters:\n",
" f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)\n",
" r2.append(f2)\n",
" # with offset and weights\n",
" r3 = []\n",
" for p in parameters:\n",
" f3 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)\n",
" r3.append(f3)\n",
" assert np.allclose(np.array(r1).flatten().view(float),\n",
" np.array(r2).flatten().view(float))\n",
" assert np.allclose(np.array(r2).flatten().view(float),\n",
" np.array(r3).flatten().view(float))\n",
"\n",
"\n",
"def test_angle_swap():\n",
" \"\"\"\n",
" Test if everything still works, when angles are swapped.\n",
" \"\"\"\n",
" sino, angles = create_test_sino_2d()\n",
" # remove elements so that we can see that weighting works\n",
" angles = angles[:-2]\n",
" sino = sino[:-2, :]\n",
" parameters = get_test_parameter_set(2)\n",
" # reference\n",
" r1 = []\n",
" for p in parameters:\n",
" f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)\n",
" r1.append(f1)\n",
" # change order of angles\n",
" order = np.argsort(angles % .5)\n",
" angles = angles[order]\n",
" sino = sino[order, :]\n",
" r2 = []\n",
" for p in parameters:\n",
" f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)\n",
" r2.append(f2)\n",
" assert np.allclose(np.array(r1).flatten().view(float),\n",
" np.array(r2).flatten().view(float))\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" # Run all tests\n",
" loc = locals()\n",
" for key in list(loc.keys()):\n",
" if key.startswith(\"test_\") and hasattr(loc[key], \"__call__\"):\n",
" loc[key]()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 68 | 0 |
from __future__ import absolute_import
import re
from . import Processor
import makerbot_driver
class DualRetractProcessor(Processor):
def __init__(self):
super(DualRetractProcessor, self).__init__()
self.layer_start = re.compile("^(;\s?Slice|\(<layer>) [0-9.]+.*", re.I)
self.snort = re.compile(
"^G1.*[AB]([0-9.-]+).*;? (?:Retract|End of print)|^G1 F[0-9.-]+\nG1 E([0-9.-]+)", re.I)
self.squirt = re.compile(
"^G1.*[AB]([0-9.-]+).*;? Restart|^G1 F[0-9.-]+\nG1 E([0-9.-]+)", re.I)
self.toolchange = re.compile("^M135 T([0-9])")
self.SF_feedrate = re.compile("^G1 F[0-9.-]+\n")
self.prime = re.compile(".*prime.*|.*Prime.*")
self.TOOLHEADS = ['A', 'B']
def isGenerator(self,iterable):
"""
Fucntion decides if the input iterable is a generator
@param iterable: iterable object
@return boolean: True if it is a generator
"""
return hasattr(iterable, '__iter__') and not hasattr(iterable, '__len__')
def sandwich_iter(self, iterable):
"""
This function returns an iterator with the previous,current,and next values
in a given iterable
@param iterable: iterable object
@return iterator of triplets
"""
if(self.isGenerator(iterable)):
iterator = iterable
else:
iterator = iter(iterable)
current = iterator.next()
prev = None
for next in iterator:
yield(prev,current,next)
prev = current
current = next
yield(prev,current,'')
def process_gcode(self, gcode_in, gcode_info=None):
"""
This function adds retractions and squirt tweaks to a gcode input
@param gcode_in: iterable object containing gcode
"""
self.retract_distance_mm = self.profile.values["dualstrusion"][
"retract_distance_mm"]
self.squirt_reduction_mm = self.profile.values["dualstrusion"][
"squirt_reduce_mm"]
self.squirt_feedrate = self.profile.values["dualstrusion"][
"squirt_feedrate"]
self.snort_feedrate = self.profile.values["dualstrusion"][
"snort_feedrate"]
if(self.retract_distance_mm == 0 or (not self.profile_supports_processor())):
#If self.retract_distance_mm is NULL or 0 then don't run the processor on
#the gcode
for code in gcode_in:
yield code
raise StopIteration
self.current_tool = -1
self.last_tool = -1
self.last_snort = {'index': None, 'tool': None, 'extruder_position':None}
self.squirt_extruder_pos = None
self.seeking_first_toolchange = True
self.seeking_first_layer = True
self.seeking_squirt = False
self.SF_flag = False
self.SF_handle_second_squirt_line = False
self.buffer = []
self.buffering = True
self.flush_buffer = False
for (previous_code,current_code,next_code) in self.sandwich_iter(gcode_in):
if(self.SF_handle_second_squirt_line):
self.SF_handle_second_squirt_line = False
continue
if(self.seeking_squirt):
#Check for more toolchanges whilst seeking the next squirt
self.check_for_significant_toolchange(current_code)
if(self.check_for_squirt(current_code+next_code)):
self.squirt_replace()
continue
elif(self.seeking_first_layer):
self.check_for_significant_toolchange(current_code)
if(self.check_for_layer(current_code)):
self.seeking_first_layer = False
else:
if(self.check_for_snort(current_code+next_code)):
self.flush_buffer = True
elif(self.check_for_significant_toolchange(current_code)):
if(self.seeking_first_toolchange):
match_prev = re.match(self.prime, previous_code)
match_next = re.match(self.prime, next_code)
if((match_prev is not None) or (match_next is not None)):
#If toolchanges are in the prime ignore
self.current_tool = self.last_tool
self.last_tool = -1
else:
#if this is the first significant toolchange do an extra squirt
self.seeking_first_toolchange = False
#little bit hacky to get first significant toolchange before output
#of squirt_tool()
self.buffer.append(current_code)
self.squirt_tool(self.current_tool, squirt_initial_inactive_tool=True)
#this is so duplicate current_codes aren't outputted
self.buffering = False
else:
self.seeking_squirt = True
self.snort_replace()
if(self.flush_buffer):
for line in self.buffer:
yield line
self.buffer = []
self.flush_buffer = False
if(self.buffering):
self.buffer.append(current_code)
else:
self.buffering = True
#Squirt retracted tool at the end of the print
self.squirt_tool(self.get_other_tool(self.current_tool))
for line in self.buffer:
yield line
def check_if_in_prime(self, previous_code, next_code):
"""
Checks if the current position is inside the prime block
that is inserted by a related processor
@param previous_code: string
@param next_code: string
@return: boolean: True if it is in the prime block
"""
match_prev = re.match(self.prime, previous_code)
match_next = re.match(self.prime, next_code)
if((match_prev is not None) or (match_next is not None)):
#If toolchanges are in the prime ignore
self.current_tool = self.last_tool
self.last_tool = -1
return True
else:
return False
def check_for_layer(self,string):
match = re.match(self.layer_start, string)
return match is not None
def check_for_snort(self,string):
"""
Check to see if input string is a snort
if so it saves the snort values and returns
@param string: string to be matched with the regex
@return boolean: True if it is a snort
"""
match = re.match(self.snort, string)
if match is not None:
extruder_position = match.group(1)
if(extruder_position == None):
extruder_position = match.group(2)
self.last_snort['index'] = 0
self.last_snort['tool'] = self.current_tool
self.last_snort['extruder_position'] = float(extruder_position)
#Check if this is a SF snort
match = re.match(self.SF_feedrate, string)
if match is not None:
self.SF_flag = True
return True
else:
return False
def check_for_significant_toolchange(self,string):
"""
Checks for significant toolchange(i.e. from tool 0 -> 1)
Updates the current tool accordingly
@param string: string to be matched to toolchange regex
@return boolean: True if a significant toolchange is found
"""
match = re.match(self.toolchange, string)
if match is not None:
if(self.current_tool == -1):
self.current_tool = int(match.group(1))
return False
elif(self.current_tool != int(match.group(1))):
self.last_tool = self.current_tool
self.current_tool = int(match.group(1))
return True
else:
return False
else:
return False
def check_for_squirt(self, string):
"""
Check if input string contains a squirt
@param string: string to be matched to squirt regex
@return boolean: True if squirt was found
"""
match = re.match(self.squirt, string)
if match is not None:
extruder_position = match.group(1)
if(extruder_position == None):
extruder_position = match.group(2)
self.squirt_extruder_pos = float(extruder_position)
match = re.match(self.SF_feedrate, string)
if match is not None:
self.SF_handle_second_squirt_line = True
self.seeking_squirt = False
return True
else:
return False
def get_other_tool(self, tool):
inactive_tool = {0:1, 1:0}
return inactive_tool.get(tool, -1)
def squirt_tool(self, tool, squirt_initial_inactive_tool=False):
"""
Inserts squirt command for given tool
@param tool: integer, tool to squirt
@param squirt_initial_inactve_tool: boolean, if this is the squirt of the initial
significant toolchange
"""
if not squirt_initial_inactive_tool:
self.buffer.append("M135 T%i\n"%(tool))
self.buffer.append("G92 %s0\n"%(self.TOOLHEADS[tool]))
self.buffer.append("G1 F%f %s%f\n"%(self.squirt_feedrate, self.TOOLHEADS[tool],
self.retract_distance_mm))
self.buffer.append("G92 %s0\n"%(self.TOOLHEADS[tool]))
def squirt_replace(self):
new_extruder_position = self.squirt_extruder_pos-self.squirt_reduction_mm
squirt_line = "G1 F%f %s%f\n"%(self.squirt_feedrate,
self.TOOLHEADS[self.current_tool], new_extruder_position)
self.buffer.append(squirt_line)
#This G92 is to help reduce the blobbing that occurs on tool startup by reducing
#the amount of plastic put out on squirt
set_extruder_pos_line = "G92 %s%f\n"%(self.TOOLHEADS[self.current_tool],
self.squirt_extruder_pos)
self.buffer.append(set_extruder_pos_line)
def snort_replace(self):
"""
Replaces a past snort
"""
if(self.last_snort['index'] != None):
snort_index = self.last_snort['index']
snort_extruder_position = self.last_snort['extruder_position']
new_extruder_position = snort_extruder_position-self.retract_distance_mm
snort_line = "G1 F%f %s%f\n"%(self.snort_feedrate,
self.TOOLHEADS[self.last_tool], new_extruder_position)
self.buffer[snort_index] = snort_line
#if SF replace second line of the snort with a blank line
if(self.SF_flag):
self.buffer[snort_index+1] = '\n'
#Reset Last Snort
self.last_snort['index'] = None
self.last_snort['tool'] = None
self.last_snort['extruder_position'] = None
def profile_supports_processor(self):
if(self.retract_distance_mm == 'NULL'):
return False
else:
return True
| [
"from __future__ import absolute_import\n",
"\n",
"import re\n",
"\n",
"from . import Processor\n",
"import makerbot_driver\n",
"\n",
"\n",
"class DualRetractProcessor(Processor):\n",
" def __init__(self):\n",
" super(DualRetractProcessor, self).__init__()\n",
" self.layer_start = re.compile(\"^(;\\s?Slice|\\(<layer>) [0-9.]+.*\", re.I)\n",
" self.snort = re.compile(\n",
" \"^G1.*[AB]([0-9.-]+).*;? (?:Retract|End of print)|^G1 F[0-9.-]+\\nG1 E([0-9.-]+)\", re.I)\n",
" self.squirt = re.compile(\n",
" \"^G1.*[AB]([0-9.-]+).*;? Restart|^G1 F[0-9.-]+\\nG1 E([0-9.-]+)\", re.I)\n",
" self.toolchange = re.compile(\"^M135 T([0-9])\")\n",
" self.SF_feedrate = re.compile(\"^G1 F[0-9.-]+\\n\")\n",
" self.prime = re.compile(\".*prime.*|.*Prime.*\")\n",
"\n",
" self.TOOLHEADS = ['A', 'B']\n",
"\n",
"\n",
" def isGenerator(self,iterable):\n",
" \"\"\"\n",
" Fucntion decides if the input iterable is a generator\n",
"\n",
" @param iterable: iterable object\n",
" @return boolean: True if it is a generator\n",
" \"\"\"\n",
" return hasattr(iterable, '__iter__') and not hasattr(iterable, '__len__')\n",
"\n",
"\n",
" def sandwich_iter(self, iterable):\n",
" \"\"\"\n",
" This function returns an iterator with the previous,current,and next values\n",
" in a given iterable\n",
"\n",
" @param iterable: iterable object\n",
" @return iterator of triplets\n",
" \"\"\"\n",
" if(self.isGenerator(iterable)):\n",
" iterator = iterable\n",
" else:\n",
" iterator = iter(iterable)\n",
"\n",
" current = iterator.next()\n",
" prev = None\n",
"\n",
" for next in iterator:\n",
" yield(prev,current,next)\n",
" prev = current \n",
" current = next\n",
" yield(prev,current,'')\n",
"\n",
"\n",
" def process_gcode(self, gcode_in, gcode_info=None):\n",
" \"\"\"\n",
" This function adds retractions and squirt tweaks to a gcode input\n",
"\n",
" @param gcode_in: iterable object containing gcode\n",
" \"\"\"\n",
" self.retract_distance_mm = self.profile.values[\"dualstrusion\"][\n",
" \"retract_distance_mm\"]\n",
" self.squirt_reduction_mm = self.profile.values[\"dualstrusion\"][\n",
" \"squirt_reduce_mm\"]\n",
" self.squirt_feedrate = self.profile.values[\"dualstrusion\"][\n",
" \"squirt_feedrate\"]\n",
" self.snort_feedrate = self.profile.values[\"dualstrusion\"][\n",
" \"snort_feedrate\"]\n",
"\n",
"\n",
" if(self.retract_distance_mm == 0 or (not self.profile_supports_processor())):\n",
" #If self.retract_distance_mm is NULL or 0 then don't run the processor on\n",
" #the gcode\n",
" for code in gcode_in:\n",
" yield code\n",
" raise StopIteration\n",
"\n",
" self.current_tool = -1\n",
" self.last_tool = -1\n",
" self.last_snort = {'index': None, 'tool': None, 'extruder_position':None}\n",
" self.squirt_extruder_pos = None\n",
" self.seeking_first_toolchange = True\n",
" self.seeking_first_layer = True\n",
" self.seeking_squirt = False\n",
" self.SF_flag = False\n",
" self.SF_handle_second_squirt_line = False\n",
" self.buffer = []\n",
" self.buffering = True\n",
" self.flush_buffer = False\n",
"\n",
" for (previous_code,current_code,next_code) in self.sandwich_iter(gcode_in): \n",
" if(self.SF_handle_second_squirt_line):\n",
" self.SF_handle_second_squirt_line = False\n",
" continue\n",
"\n",
" if(self.seeking_squirt):\n",
" #Check for more toolchanges whilst seeking the next squirt\n",
" self.check_for_significant_toolchange(current_code)\n",
" if(self.check_for_squirt(current_code+next_code)):\n",
" self.squirt_replace()\n",
" continue\n",
" elif(self.seeking_first_layer):\n",
" self.check_for_significant_toolchange(current_code)\n",
" if(self.check_for_layer(current_code)):\n",
" self.seeking_first_layer = False\n",
" else:\n",
" if(self.check_for_snort(current_code+next_code)):\n",
" self.flush_buffer = True\n",
" elif(self.check_for_significant_toolchange(current_code)):\n",
" if(self.seeking_first_toolchange):\n",
" match_prev = re.match(self.prime, previous_code)\n",
" match_next = re.match(self.prime, next_code)\n",
" if((match_prev is not None) or (match_next is not None)):\n",
" #If toolchanges are in the prime ignore\n",
" self.current_tool = self.last_tool\n",
" self.last_tool = -1\n",
" else:\n",
" #if this is the first significant toolchange do an extra squirt\n",
" self.seeking_first_toolchange = False\n",
" #little bit hacky to get first significant toolchange before output\n",
" #of squirt_tool()\n",
" self.buffer.append(current_code)\n",
" self.squirt_tool(self.current_tool, squirt_initial_inactive_tool=True)\n",
" #this is so duplicate current_codes aren't outputted\n",
" self.buffering = False\n",
" else:\n",
" self.seeking_squirt = True\n",
" self.snort_replace()\n",
"\n",
" if(self.flush_buffer):\n",
" for line in self.buffer:\n",
" yield line\n",
" self.buffer = []\n",
" self.flush_buffer = False\n",
" if(self.buffering):\n",
" self.buffer.append(current_code)\n",
" else:\n",
" self.buffering = True\n",
"\n",
" #Squirt retracted tool at the end of the print\n",
" self.squirt_tool(self.get_other_tool(self.current_tool))\n",
"\n",
" for line in self.buffer:\n",
" yield line\n",
"\n",
"\n",
" def check_if_in_prime(self, previous_code, next_code):\n",
" \"\"\"\n",
" Checks if the current position is inside the prime block\n",
" that is inserted by a related processor\n",
"\n",
" @param previous_code: string\n",
" @param next_code: string\n",
" @return: boolean: True if it is in the prime block\n",
" \"\"\"\n",
" match_prev = re.match(self.prime, previous_code)\n",
" match_next = re.match(self.prime, next_code)\n",
" if((match_prev is not None) or (match_next is not None)):\n",
" #If toolchanges are in the prime ignore\n",
" self.current_tool = self.last_tool\n",
" self.last_tool = -1\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
"\n",
" def check_for_layer(self,string):\n",
" match = re.match(self.layer_start, string)\n",
" return match is not None\n",
"\n",
"\n",
" def check_for_snort(self,string):\n",
" \"\"\"\n",
" Check to see if input string is a snort\n",
" if so it saves the snort values and returns\n",
" \n",
" @param string: string to be matched with the regex\n",
" @return boolean: True if it is a snort\n",
" \"\"\"\n",
" match = re.match(self.snort, string)\n",
" if match is not None:\n",
" extruder_position = match.group(1)\n",
" if(extruder_position == None):\n",
" extruder_position = match.group(2)\n",
" self.last_snort['index'] = 0\n",
" self.last_snort['tool'] = self.current_tool\n",
" self.last_snort['extruder_position'] = float(extruder_position)\n",
" #Check if this is a SF snort\n",
" match = re.match(self.SF_feedrate, string)\n",
" if match is not None:\n",
" self.SF_flag = True\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
"\n",
" def check_for_significant_toolchange(self,string):\n",
" \"\"\"\n",
" Checks for significant toolchange(i.e. from tool 0 -> 1)\n",
" Updates the current tool accordingly\n",
"\n",
" @param string: string to be matched to toolchange regex\n",
" @return boolean: True if a significant toolchange is found\n",
" \"\"\"\n",
" match = re.match(self.toolchange, string)\n",
" if match is not None:\n",
" if(self.current_tool == -1):\n",
" self.current_tool = int(match.group(1))\n",
" return False\n",
" elif(self.current_tool != int(match.group(1))):\n",
" self.last_tool = self.current_tool\n",
" self.current_tool = int(match.group(1))\n",
" return True\n",
" else:\n",
" return False\n",
" else:\n",
" return False\n",
"\n",
"\n",
" def check_for_squirt(self, string):\n",
" \"\"\"\n",
" Check if input string contains a squirt\n",
"\n",
" @param string: string to be matched to squirt regex\n",
" @return boolean: True if squirt was found\n",
" \"\"\"\n",
" match = re.match(self.squirt, string)\n",
" if match is not None:\n",
" extruder_position = match.group(1)\n",
" if(extruder_position == None):\n",
" extruder_position = match.group(2)\n",
" self.squirt_extruder_pos = float(extruder_position)\n",
" match = re.match(self.SF_feedrate, string)\n",
" if match is not None:\n",
" self.SF_handle_second_squirt_line = True\n",
" self.seeking_squirt = False\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
"\n",
" def get_other_tool(self, tool):\n",
" inactive_tool = {0:1, 1:0}\n",
" return inactive_tool.get(tool, -1)\n",
"\n",
"\n",
" def squirt_tool(self, tool, squirt_initial_inactive_tool=False):\n",
" \"\"\"\n",
" Inserts squirt command for given tool\n",
" @param tool: integer, tool to squirt\n",
" @param squirt_initial_inactve_tool: boolean, if this is the squirt of the initial\n",
" significant toolchange\n",
" \"\"\"\n",
" if not squirt_initial_inactive_tool:\n",
" self.buffer.append(\"M135 T%i\\n\"%(tool))\n",
" self.buffer.append(\"G92 %s0\\n\"%(self.TOOLHEADS[tool]))\n",
" self.buffer.append(\"G1 F%f %s%f\\n\"%(self.squirt_feedrate, self.TOOLHEADS[tool],\n",
" self.retract_distance_mm))\n",
" self.buffer.append(\"G92 %s0\\n\"%(self.TOOLHEADS[tool]))\n",
" \n",
"\n",
" def squirt_replace(self):\n",
" new_extruder_position = self.squirt_extruder_pos-self.squirt_reduction_mm\n",
"\n",
" squirt_line = \"G1 F%f %s%f\\n\"%(self.squirt_feedrate,\n",
" self.TOOLHEADS[self.current_tool], new_extruder_position)\n",
" self.buffer.append(squirt_line)\n",
" #This G92 is to help reduce the blobbing that occurs on tool startup by reducing\n",
" #the amount of plastic put out on squirt\n",
" set_extruder_pos_line = \"G92 %s%f\\n\"%(self.TOOLHEADS[self.current_tool],\n",
" self.squirt_extruder_pos)\n",
" self.buffer.append(set_extruder_pos_line)\n",
"\n",
"\n",
" def snort_replace(self):\n",
" \"\"\"\n",
" Replaces a past snort\n",
" \"\"\"\n",
" if(self.last_snort['index'] != None):\n",
" snort_index = self.last_snort['index']\n",
" snort_extruder_position = self.last_snort['extruder_position']\n",
" new_extruder_position = snort_extruder_position-self.retract_distance_mm\n",
"\n",
" snort_line = \"G1 F%f %s%f\\n\"%(self.snort_feedrate,\n",
" self.TOOLHEADS[self.last_tool], new_extruder_position)\n",
" self.buffer[snort_index] = snort_line\n",
" #if SF replace second line of the snort with a blank line\n",
" if(self.SF_flag):\n",
" self.buffer[snort_index+1] = '\\n'\n",
"\n",
" #Reset Last Snort\n",
" self.last_snort['index'] = None\n",
" self.last_snort['tool'] = None\n",
" self.last_snort['extruder_position'] = None\n",
"\n",
"\n",
" def profile_supports_processor(self):\n",
" if(self.retract_distance_mm == 'NULL'):\n",
" return False\n",
" else:\n",
" return True\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0.01,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0.02564102564102564,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05405405405405406,
0.02857142857142857,
0,
0.06451612903225806,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.023255813953488372,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.014705882352941176,
0,
0,
0,
0.021739130434782608,
0,
0.020833333333333332,
0.021739130434782608,
0,
0.010101010101010102,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0.03636363636363636,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.05714285714285714,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0.019230769230769232,
0.014925373134328358,
0.022727272727272728,
0.02564102564102564,
0.015873015873015872,
0.1111111111111111,
0,
0.03333333333333333,
0.012195121951219513,
0,
0.01639344262295082,
0.014285714285714285,
0,
0.02247191011235955,
0.02040816326530612,
0.024691358024691357,
0.02631578947368421,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0.021739130434782608,
0,
0,
0.011764705882352941,
0,
0.015873015873015872,
0.014084507042253521,
0,
0.014285714285714285,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
1
] | 304 | 0.008811 |
#!/usr/bin/python
import participantCollection
import re
import datetime
import pyperclip
currentMonthIndex = datetime.date.today().month
#TODO: need to figure out how to get total days in current month...
currentMonthTotalDays = 30
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
currentDayOfMonthIndex = 29
# TODO: more...
currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
percentStillIn = int(round(100*numberStillIn/initialNumber,0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. We will no longer be accepting new signups. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2to9():
print '2 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15-currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I might re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
#TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
# return stringToPrintLegacy()
if currentDayOfMonthIndex == 1:
return templateFor1()
#elif ( currentDayOfMonthIndex >= 2 ) and ( currentDayOfMonthIndex <= 9 ):
elif ( 2 <= currentDayOfMonthIndex <= 9 ):
return templateFor2to9()
#elif ( currentDayOfMonthIndex >= 10 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( 10 <= currentDayOfMonthIndex <= 14 ):
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
#elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= currentMonthPenultimateDayIndex ):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )
answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )
answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )
answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )
answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )
answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
# print re.sub('FOO', 'there', 'hello FOO yall')
# for participant in participantCollection.participantsWhoAreStillIn():
# if participant.hasCheckedIn:
# print "/u/" + participant.name
# else:
# print "/u/" + participant.name + " ~"
# print ""
| [
"#!/usr/bin/python\n",
"import participantCollection\n",
"import re\n",
"import datetime\n",
"import pyperclip\n",
"\n",
"currentMonthIndex = datetime.date.today().month\n",
"#TODO: need to figure out how to get total days in current month...\n",
"currentMonthTotalDays = 30\n",
"currentMonthPenultimateDayIndex = currentMonthTotalDays - 1\n",
"currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]\n",
"nextMonthIndex = currentMonthIndex % 12 + 1\n",
"nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]\n",
"currentDayOfMonthIndex = datetime.date.today().day\n",
"# TODO: testing...\n",
"currentDayOfMonthIndex = 29\n",
"# TODO: more...\n",
"currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]\n",
"currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]\n",
"\n",
"\n",
"participantCollection = participantCollection.ParticipantCollection()\n",
"numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()\n",
"initialNumber = participantCollection.size()\n",
"percentStillIn = int(round(100*numberStillIn/initialNumber,0))\n",
"\n",
"\n",
"# print \"There are currently **\" + str(numberStillIn) + \" out of \" + str(initialNumber) +\"** original participants. That's **\" + str(int(round(100*numberStillIn/initialNumber,0))) + \"%** Here is the list of participants still with the challenge:\\n\"\n",
"\n",
"def stringToPrintLegacy():\n",
" answer = \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\\n\\n\"\n",
" answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )\n",
" answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )\n",
" answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )\n",
" for participant in participantCollection.participantsWhoAreStillIn():\n",
" answer += \"/u/\" + participant.name\n",
" if not participant.hasCheckedIn:\n",
" answer += \" ~\"\n",
" answer += \"\\n\\n\"\n",
" return answer\n",
"\n",
"def templateForParticipants():\n",
" answer = \"\"\n",
" for participant in participantCollection.participantsWhoAreStillIn():\n",
" answer += \"/u/\" + participant.name\n",
" if not participant.hasCheckedIn:\n",
" answer += \" ~\"\n",
" answer += \"\\n\\n\"\n",
" return answer\n",
"\n",
"def templateForParticipantsOnFinalDay():\n",
" answer = \"\"\n",
"\n",
" answer += \"These participants have checked in at least once in the last 15 days:\\n\"\n",
" answer += \"\\n\"\n",
" for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():\n",
" answer += \"/u/\" + participant.name + \"\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\\n\"\n",
" answer += \"\\n\"\n",
" for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():\n",
" answer += \"/u/\" + participant.name + \" ~\\n\"\n",
" answer += \"\\n\"\n",
" return answer\n",
"\n",
"def templateFor1():\n",
" print '1\\n\\n'\n",
" answer = \"\"\n",
" print \"=============================================================\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. We will no longer be accepting new signups. Best of luck to everyone here!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Here's how this thing works:\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\\n\"\n",
" answer += \"- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\\n\"\n",
" answer += \"- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\\n\"\n",
" answer += '- If you have a \"~\" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\\n'\n",
" answer += \"- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Good luck!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Here are our **INITIAL_NUMBER** original participants:\\n\\n\"\n",
" answer += templateForParticipants()\n",
" print \"=============================================================\"\n",
" return answer\n",
"\n",
"def templateFor2to9():\n",
" print '2 to 9\\n\\n'\n",
" answer = \"\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Guidelines:\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\\n\"\n",
" answer += \"- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\\n\"\n",
" answer += \"- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\\n\"\n",
" answer += '- If you have a \"~\" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\\n'\n",
" answer += \"- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Good luck!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\\n\\n\"\n",
" answer += templateForParticipants()\n",
" return answer\n",
"\n",
"\n",
"def templateFor10to14():\n",
" print '10 to 14\\n\\n'\n",
" answer = \"\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"**THE COUNTDOWN: Attention everyone!** You have \" + str(15-currentDayOfMonthIndex) + \" days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Guidelines:\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\\n\"\n",
" answer += \"- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\\n\"\n",
" answer += \"- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\\n\"\n",
" answer += '- If you have a \"~\" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\\n'\n",
" answer += \"- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Good luck!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\\n\\n\"\n",
" answer += templateForParticipants()\n",
" return answer\n",
"\n",
"def templateFor15():\n",
" print '15\\n\\n'\n",
" answer = \"\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Guidelines:\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\\n\"\n",
" answer += \"- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\\n\"\n",
" answer += \"- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\\n\"\n",
" answer += '- If you have a \"~\" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\\n'\n",
" answer += \"- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Good luck!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\\n\\n\"\n",
" answer += templateForParticipants()\n",
" return answer\n",
"\n",
"def templateFor16toPenultimate():\n",
" print '16 to penultimate\\n\\n'\n",
" answer = \"\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I might re-add you.\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Guidelines:\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\\n\"\n",
" answer += \"- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\\n\"\n",
" answer += \"- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\\n\"\n",
" answer += '- If you have a \"~\" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\\n'\n",
" answer += \"- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"Good luck!\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\\n\\n\"\n",
" answer += templateForParticipants()\n",
" return answer\n",
"\n",
"def templateForUltimate():\n",
" print 'Ultimate\\n\\n'\n",
" answer = \"\"\n",
" answer += \"**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\\n\"\n",
" answer += \"\\n\"\n",
" answer += \"For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\\n\"\n",
" answer += \"\\n\"\n",
" #TODO: need to do the part where it lists the checked in and non-checked in participants separately.\n",
" answer += \"There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\\n\\n\"\n",
" answer += templateForParticipantsOnFinalDay()\n",
" return answer\n",
"\n",
"def templateToUse():\n",
" # return stringToPrintLegacy()\n",
" if currentDayOfMonthIndex == 1:\n",
" return templateFor1()\n",
" #elif ( currentDayOfMonthIndex >= 2 ) and ( currentDayOfMonthIndex <= 9 ):\n",
" elif ( 2 <= currentDayOfMonthIndex <= 9 ):\n",
" return templateFor2to9()\n",
" #elif ( currentDayOfMonthIndex >= 10 ) and ( currentDayOfMonthIndex <= 14 ):\n",
" elif ( 10 <= currentDayOfMonthIndex <= 14 ):\n",
" return templateFor10to14()\n",
" if currentDayOfMonthIndex == 15:\n",
" return templateFor15()\n",
" #elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= 14 ):\n",
" elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= currentMonthPenultimateDayIndex ):\n",
" return templateFor16toPenultimate()\n",
" else:\n",
" return templateForUltimate()\n",
"\n",
"def stringToPrint():\n",
" answer = templateToUse()\n",
" answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )\n",
" answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )\n",
" answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )\n",
" answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )\n",
" answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )\n",
" answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )\n",
" answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )\n",
" answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )\n",
" answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )\n",
" answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )\n",
" answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )\n",
" answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )\n",
" return answer\n",
"\n",
"outputString = stringToPrint()\n",
"print \"=============================================================\"\n",
"print outputString\n",
"print \"=============================================================\"\n",
"pyperclip.copy(outputString)\n",
"\n",
"# print re.sub('FOO', 'there', 'hello FOO yall')\n",
"# for participant in participantCollection.participantsWhoAreStillIn():\n",
"# if participant.hasCheckedIn:\n",
"# print \"/u/\" + participant.name\n",
"# else:\n",
"# print \"/u/\" + participant.name + \" ~\"\n",
"# print \"\"\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.014705882352941176,
0,
0,
0.06914893617021277,
0,
0.07142857142857142,
0,
0,
0,
0,
0.059369202226345084,
0.053691275167785234,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0.00398406374501992,
0,
0,
0.005128205128205128,
0.028985507246376812,
0.029411764705882353,
0.028169014084507043,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0.011363636363636364,
0,
0.011111111111111112,
0,
0,
0.0043859649122807015,
0,
0.010752688172043012,
0,
0,
0,
0,
0.05,
0,
0,
0,
0.0036231884057971015,
0,
0,
0,
0.004694835680751174,
0.005208333333333333,
0.005076142131979695,
0.004132231404958678,
0.004608294930875576,
0,
0,
0,
0.005555555555555556,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0.004347826086956522,
0,
0,
0,
0.004694835680751174,
0.005208333333333333,
0.005076142131979695,
0.004132231404958678,
0.004608294930875576,
0,
0,
0,
0.005555555555555556,
0,
0.00510204081632653,
0,
0,
0,
0,
0,
0,
0,
0.004347826086956522,
0,
0.0035335689045936395,
0,
0,
0,
0.004694835680751174,
0.005208333333333333,
0.005076142131979695,
0.004132231404958678,
0.004608294930875576,
0,
0,
0,
0.005555555555555556,
0,
0.00510204081632653,
0,
0,
0,
0.047619047619047616,
0,
0,
0.004347826086956522,
0,
0.006024096385542169,
0,
0,
0,
0.004694835680751174,
0.005208333333333333,
0.005076142131979695,
0.004132231404958678,
0.004608294930875576,
0,
0,
0,
0.005555555555555556,
0,
0.00510204081632653,
0,
0,
0,
0.029411764705882353,
0,
0,
0.004347826086956522,
0,
0.0040650406504065045,
0,
0,
0,
0.004694835680751174,
0.005208333333333333,
0.005076142131979695,
0.0034482758620689655,
0.004608294930875576,
0,
0,
0,
0.005555555555555556,
0,
0.00510204081632653,
0,
0,
0,
0.037037037037037035,
0,
0,
0.0015527950310559005,
0,
0.005555555555555556,
0,
0.018867924528301886,
0.007352941176470588,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.012658227848101266,
0.0425531914893617,
0,
0.024691358024691357,
0.04081632653061224,
0,
0,
0,
0.024691358024691357,
0.045871559633027525,
0,
0,
0,
0,
0.047619047619047616,
0,
0.028985507246376812,
0.029411764705882353,
0.028169014084507043,
0.025974025974025976,
0.03488372093023256,
0.028037383177570093,
0.02857142857142857,
0.028169014084507043,
0.03125,
0.033707865168539325,
0.036585365853658534,
0.025,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 239 | 0.010851 |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Mobile IP.
"""
from scapy.fields import ByteEnumField, ByteField, IPField, LongField, \
ShortField, XByteField
from scapy.packet import Packet, bind_layers, bind_bottom_up
from scapy.layers.inet import IP, UDP
class MobileIP(Packet):
name = "Mobile IP (RFC3344)"
fields_desc = [ByteEnumField("type", 1, {1: "RRQ", 3: "RRP"})]
class MobileIPRRQ(Packet):
name = "Mobile IP Registration Request (RFC3344)"
fields_desc = [XByteField("flags", 0),
ShortField("lifetime", 180),
IPField("homeaddr", "0.0.0.0"),
IPField("haaddr", "0.0.0.0"),
IPField("coaddr", "0.0.0.0"),
LongField("id", 0), ]
class MobileIPRRP(Packet):
name = "Mobile IP Registration Reply (RFC3344)"
fields_desc = [ByteField("code", 0),
ShortField("lifetime", 180),
IPField("homeaddr", "0.0.0.0"),
IPField("haaddr", "0.0.0.0"),
LongField("id", 0), ]
class MobileIPTunnelData(Packet):
name = "Mobile IP Tunnel Data Message (RFC3519)"
fields_desc = [ByteField("nexthdr", 4),
ShortField("res", 0)]
bind_bottom_up(UDP, MobileIP, dport=434)
bind_bottom_up(UDP, MobileIP, sport=434)
bind_layers(UDP, MobileIP, sport=434, dport=434)
bind_layers(MobileIP, MobileIPRRQ, type=1)
bind_layers(MobileIP, MobileIPRRP, type=3)
bind_layers(MobileIP, MobileIPTunnelData, type=4)
bind_layers(MobileIPTunnelData, IP, nexthdr=4)
| [
"# This file is part of Scapy\n",
"# See http://www.secdev.org/projects/scapy for more information\n",
"# Copyright (C) Philippe Biondi <[email protected]>\n",
"# This program is published under a GPLv2 license\n",
"\n",
"\"\"\"\n",
"Mobile IP.\n",
"\"\"\"\n",
"\n",
"from scapy.fields import ByteEnumField, ByteField, IPField, LongField, \\\n",
" ShortField, XByteField\n",
"from scapy.packet import Packet, bind_layers, bind_bottom_up\n",
"from scapy.layers.inet import IP, UDP\n",
"\n",
"\n",
"class MobileIP(Packet):\n",
" name = \"Mobile IP (RFC3344)\"\n",
" fields_desc = [ByteEnumField(\"type\", 1, {1: \"RRQ\", 3: \"RRP\"})]\n",
"\n",
"\n",
"class MobileIPRRQ(Packet):\n",
" name = \"Mobile IP Registration Request (RFC3344)\"\n",
" fields_desc = [XByteField(\"flags\", 0),\n",
" ShortField(\"lifetime\", 180),\n",
" IPField(\"homeaddr\", \"0.0.0.0\"),\n",
" IPField(\"haaddr\", \"0.0.0.0\"),\n",
" IPField(\"coaddr\", \"0.0.0.0\"),\n",
" LongField(\"id\", 0), ]\n",
"\n",
"\n",
"class MobileIPRRP(Packet):\n",
" name = \"Mobile IP Registration Reply (RFC3344)\"\n",
" fields_desc = [ByteField(\"code\", 0),\n",
" ShortField(\"lifetime\", 180),\n",
" IPField(\"homeaddr\", \"0.0.0.0\"),\n",
" IPField(\"haaddr\", \"0.0.0.0\"),\n",
" LongField(\"id\", 0), ]\n",
"\n",
"\n",
"class MobileIPTunnelData(Packet):\n",
" name = \"Mobile IP Tunnel Data Message (RFC3519)\"\n",
" fields_desc = [ByteField(\"nexthdr\", 4),\n",
" ShortField(\"res\", 0)]\n",
"\n",
"\n",
"bind_bottom_up(UDP, MobileIP, dport=434)\n",
"bind_bottom_up(UDP, MobileIP, sport=434)\n",
"bind_layers(UDP, MobileIP, sport=434, dport=434)\n",
"bind_layers(MobileIP, MobileIPRRQ, type=1)\n",
"bind_layers(MobileIP, MobileIPRRP, type=3)\n",
"bind_layers(MobileIP, MobileIPTunnelData, type=4)\n",
"bind_layers(MobileIPTunnelData, IP, nexthdr=4)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 52 | 0 |
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# kenwaldek MIT-license
# Title: PyQt5 lesson 8 Version: 1.0
# Date: 08-01-17 Language: python3
# Description: pyqt5 gui messageBox quit application
# pythonprogramming.net from PyQt4 to PyQt5
###############################################################
# do something
import sys
from PyQt5.QtCore import QCoreApplication, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QPushButton, QAction, QMessageBox
from PyQt5.QtWidgets import QCheckBox
class window(QMainWindow):
def __init__(self):
super(window, self).__init__()
self.setGeometry(50, 50, 500, 300)
self.setWindowTitle('pyqt5 Tut')
# self.setWindowIcon(QIcon('pic.png'))
extractAction = QAction('&Get to the choppah', self)
extractAction.setShortcut('Ctrl+Q')
extractAction.setStatusTip('leave the app')
extractAction.triggered.connect(self.close_application)
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
extractAction = QAction(QIcon('pic.png'), 'flee the scene', self)
extractAction.triggered.connect(self.close_application)
self.toolBar = self.addToolBar('extraction')
self.toolBar.addAction(extractAction)
self.home()
def home(self):
btn = QPushButton('quit', self)
btn.clicked.connect(self.close_application)
btn.resize(btn.sizeHint())
btn.move(0, 100)
checkBox = QCheckBox('Enlarge window', self)
# checkBox.toggle() # if you want to be checked in in the begin
checkBox.move(0, 50)
checkBox.stateChanged.connect(self.enlarge_window)
self.show()
def enlarge_window(self, state):
if state == Qt.Checked:
self.setGeometry(50, 50, 1000, 600)
else:
self.setGeometry(50, 50 , 500, 300)
def close_application(self):
choice = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if choice == QMessageBox.Yes:
print('quit application')
sys.exit()
else:
pass
if __name__ == "__main__": # had to add this otherwise app crashed
def run():
app = QApplication(sys.argv)
Gui = window()
sys.exit(app.exec_())
run()
| [
"#! /usr/bin/env python3\n",
"# -*- coding:utf-8 -*-\n",
"###############################################################\n",
"# kenwaldek MIT-license\n",
"\n",
"# Title: PyQt5 lesson 8 Version: 1.0\n",
"# Date: 08-01-17 Language: python3\n",
"# Description: pyqt5 gui messageBox quit application\n",
"# pythonprogramming.net from PyQt4 to PyQt5\n",
"###############################################################\n",
"\n",
"# do something\n",
"import sys\n",
"from PyQt5.QtCore import QCoreApplication, Qt\n",
"from PyQt5.QtGui import QIcon\n",
"from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QPushButton, QAction, QMessageBox\n",
"from PyQt5.QtWidgets import QCheckBox\n",
"\n",
"\n",
"\n",
"class window(QMainWindow):\n",
"\n",
" def __init__(self):\n",
" super(window, self).__init__()\n",
" self.setGeometry(50, 50, 500, 300)\n",
" self.setWindowTitle('pyqt5 Tut')\n",
" # self.setWindowIcon(QIcon('pic.png'))\n",
"\n",
" extractAction = QAction('&Get to the choppah', self)\n",
" extractAction.setShortcut('Ctrl+Q')\n",
" extractAction.setStatusTip('leave the app')\n",
" extractAction.triggered.connect(self.close_application)\n",
"\n",
" self.statusBar()\n",
"\n",
" mainMenu = self.menuBar()\n",
" fileMenu = mainMenu.addMenu('&File')\n",
" fileMenu.addAction(extractAction)\n",
"\n",
" extractAction = QAction(QIcon('pic.png'), 'flee the scene', self)\n",
" extractAction.triggered.connect(self.close_application)\n",
"\n",
" self.toolBar = self.addToolBar('extraction')\n",
" self.toolBar.addAction(extractAction)\n",
"\n",
" self.home()\n",
"\n",
" def home(self):\n",
" btn = QPushButton('quit', self)\n",
" btn.clicked.connect(self.close_application)\n",
" btn.resize(btn.sizeHint())\n",
" btn.move(0, 100)\n",
"\n",
" checkBox = QCheckBox('Enlarge window', self)\n",
" # checkBox.toggle() # if you want to be checked in in the begin\n",
" checkBox.move(0, 50)\n",
" checkBox.stateChanged.connect(self.enlarge_window)\n",
"\n",
"\n",
" self.show()\n",
"\n",
" def enlarge_window(self, state):\n",
" if state == Qt.Checked:\n",
" self.setGeometry(50, 50, 1000, 600)\n",
" else:\n",
" self.setGeometry(50, 50 , 500, 300)\n",
"\n",
"\n",
" def close_application(self):\n",
"\n",
" choice = QMessageBox.question(self, 'Message',\n",
" \"Are you sure to quit?\", QMessageBox.Yes |\n",
" QMessageBox.No, QMessageBox.No)\n",
"\n",
" if choice == QMessageBox.Yes:\n",
" print('quit application')\n",
" sys.exit()\n",
" else:\n",
" pass\n",
"\n",
"\n",
"if __name__ == \"__main__\": # had to add this otherwise app crashed\n",
"\n",
" def run():\n",
" app = QApplication(sys.argv)\n",
" Gui = window()\n",
" sys.exit(app.exec_())\n",
"\n",
"run()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0.030303030303030304,
0,
0,
0.0125,
0.014492753623188406,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 89 | 0.00197 |
import os
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.contrib.auth.models import User
import json
from .managers import BearerTokenManager
class Provider(models.Model):
"""
A resource provider like Facebook, Google Drive, Twitter, ...
It provides OAuth 2.0 protected resources.
Any bearer (like Moogle) can access its protected resources using bearer tokens.
"""
# `name` is a choice field because we only allow a pre-defined set of providers to be added.
# Ideally each provider should have been a model, like FacebookProvider, TwitterProvider, ...
# but they are all alike, no difference at all, so we decided to use only a model.
# The choice field is made of a machine-friendly name
# We could have added a `name_verbose` field for a descriptive user-friendly name but we
# decided that the choice field can handle this. Descriptive names never change so we don't
# see the point in adding a new filed to the database.
# Machine-friendly names, only lowercase and - (no space)
NAME_DRIVE = 'drive'
NAME_GMAIL = 'gmail'
NAME_FACEBOOK = 'facebook'
NAME_TWITTER = 'twitter'
NAME_DROPBOX = 'dropbox'
# Now build `NAME_CHOICES` with tuples made of:
# (machine-friendly name, user-friendly name)
NAME_CHOICES = (
(NAME_DRIVE, 'Google Drive'),
(NAME_GMAIL, 'Google Gmail'),
(NAME_FACEBOOK, 'Facebook'),
(NAME_TWITTER, 'Twitter'),
(NAME_DROPBOX, 'Dropbox'),
)
# For `name` choice field:
# provider.name returns the machine-friendly name
# provider.get_name_display() returns the user-friendly name
name = models.CharField(max_length=20, choices=NAME_CHOICES, unique=True)
redirect_url = models.CharField(max_length=200) # Relative url like /tokens/add/drive/callback
authorization_base_url = models.URLField()
token_url = models.URLField()
request_token_url = models.URLField(blank=True) # Used only in Oauth1
oauth_version = models.CharField(max_length=5, blank=True) # e.g. 2 or 1.0a
_scope = models.TextField(blank=True) # some providers like Dropbox have no scope
# `_scope` is json text containing a serialized list like:
# ["https://www.googleapis.com/auth/userinfo.email", "https://mail.google.com"]
# A getter and a setter property are defined on this field to automatize the conversion
# from json text to python objects.
#
# `_scope` has a getter and a setter property to automatize the conversion from json text
# to python list.
@property
def scope(self):
"""
Getter property for _scope to automatize the conversion from json text to python objects.
Read a json string from the db and return a python list.
"""
try:
return json.loads(self._scope)
except ValueError:
return None
@scope.setter
def scope(self, value):
"""
Setter property for _scope to automatize the conversion from json text to python objects.
Receive a python list and store a json string to the db.
"""
self._scope = json.dumps(value, indent=4)
@property
def client_id(self):
"""
Getter property for <PROVIDER_NAME>_CLIENT_ID environment variable.
"""
try:
var_name = "{}_CLIENT_ID".format(self.name.upper())
return os.environ[var_name]
except KeyError:
msg = "You must set the environment variable: {}".format(var_name)
raise ImproperlyConfigured(msg)
@property
def client_secret(self):
"""
Getter property for <PROVIDER_NAME>_CLIENT_SECRET environment variable.
"""
try:
var_name = "{}_CLIENT_SECRET".format(self.name.upper())
return os.environ[var_name]
except KeyError:
msg = "You must set the environment variable: {}".format(var_name)
raise ImproperlyConfigured(msg)
def __str__(self):
return "{}".format(self.name)
class BearerToken(models.Model):
"""
Token that a bearer like Moogle and Magpie can use to get access to OAuth 2.0 protected
resources. A protected resource belongs to a resource owner (user): it can be a Facebook
profile, Google Drive documents, tweets on Twitter, ...
"""
# `user` is the resource owner
user = models.ForeignKey(User)
provider = models.ForeignKey(Provider)
_token_set = models.TextField()
objects = BearerTokenManager()
# `_token_set` is json text containing a serialized dictionary like:
#{
# "refresh_token": "1/UTY6FA......XHCSIvhm1dghJHHG678",
# "expires_in": 3600,
# "token_type": "Bearer",
# "access_token": "ya29.1.AADtN_VwezbeOQGkJE4_3ZDNZimrRf86Dn...pL8YB1rpVRhav0-mIiHEmV8",
# "id_token": "eyJhbGciOiJSUzI1NiIsI...U3MWJlNZoempIreV572mbxH7Rm90eNQwfShPQnI49u8bZgc"
#}
# This example is a OAuth2 token (from Google) but it can be also OAuth1 token (like Twitter).
# A getter and a setter property are defined on this field to automatize the conversion
# from json text to python objects.
#
# Getter and setter properties for _token_set to automatize the conversion from json text
# to python objects.
# The getter reads a json string from the db and returns a python dictionary.
@property
def token_set(self):
"""
Getter property for `_token_set` to automatize the conversion from json text to python
objects. Read a json string from the db and return a python dictionary.
"""
try:
return json.loads(self._token_set)
except ValueError:
return None
@token_set.setter
def token_set(self, value):
"""
Setter property for `_token_set` to automatize the conversion from json text to python
objects. Receive a python dictionary and store a json string to the db.
"""
self._token_set = json.dumps(value, indent=4)
@property
def access_token(self):
"""
Getter property for the access_token stored in `token_set` dictionary.
"""
return self.token_set.get('access_token', '')
@property
def refresh_token(self):
"""
Getter property for the refresh_token stored in `token_set` dictionary.
"""
return self.token_set.get('refresh_token', '')
class Meta:
unique_together = ("user", "provider")
def __str__(self):
return "{}, {}".format(
self.provider.get_name_display(),
self.user.get_full_name() or self.user.get_username()
) | [
"import os\n",
"from django.core.exceptions import ImproperlyConfigured\n",
"from django.db import models\n",
"from django.contrib.auth.models import User\n",
"import json\n",
"from .managers import BearerTokenManager\n",
"\n",
"\n",
"class Provider(models.Model):\n",
" \"\"\"\n",
" A resource provider like Facebook, Google Drive, Twitter, ...\n",
" It provides OAuth 2.0 protected resources.\n",
" Any bearer (like Moogle) can access its protected resources using bearer tokens.\n",
" \"\"\"\n",
" # `name` is a choice field because we only allow a pre-defined set of providers to be added.\n",
" # Ideally each provider should have been a model, like FacebookProvider, TwitterProvider, ...\n",
" # but they are all alike, no difference at all, so we decided to use only a model.\n",
" # The choice field is made of a machine-friendly name\n",
" # We could have added a `name_verbose` field for a descriptive user-friendly name but we\n",
" # decided that the choice field can handle this. Descriptive names never change so we don't\n",
" # see the point in adding a new filed to the database.\n",
" # Machine-friendly names, only lowercase and - (no space)\n",
" NAME_DRIVE = 'drive'\n",
" NAME_GMAIL = 'gmail'\n",
" NAME_FACEBOOK = 'facebook'\n",
" NAME_TWITTER = 'twitter'\n",
" NAME_DROPBOX = 'dropbox'\n",
" # Now build `NAME_CHOICES` with tuples made of:\n",
" # (machine-friendly name, user-friendly name)\n",
" NAME_CHOICES = (\n",
" (NAME_DRIVE, 'Google Drive'),\n",
" (NAME_GMAIL, 'Google Gmail'),\n",
" (NAME_FACEBOOK, 'Facebook'),\n",
" (NAME_TWITTER, 'Twitter'),\n",
" (NAME_DROPBOX, 'Dropbox'),\n",
" )\n",
" # For `name` choice field:\n",
" # provider.name returns the machine-friendly name\n",
" # provider.get_name_display() returns the user-friendly name\n",
" name = models.CharField(max_length=20, choices=NAME_CHOICES, unique=True)\n",
" redirect_url = models.CharField(max_length=200) # Relative url like /tokens/add/drive/callback\n",
" authorization_base_url = models.URLField()\n",
" token_url = models.URLField()\n",
" request_token_url = models.URLField(blank=True) # Used only in Oauth1\n",
" oauth_version = models.CharField(max_length=5, blank=True) # e.g. 2 or 1.0a\n",
" _scope = models.TextField(blank=True) # some providers like Dropbox have no scope\n",
"\n",
" # `_scope` is json text containing a serialized list like:\n",
" # [\"https://www.googleapis.com/auth/userinfo.email\", \"https://mail.google.com\"]\n",
" # A getter and a setter property are defined on this field to automatize the conversion\n",
" # from json text to python objects.\n",
" #\n",
" # `_scope` has a getter and a setter property to automatize the conversion from json text\n",
" # to python list.\n",
" @property\n",
" def scope(self):\n",
" \"\"\"\n",
" Getter property for _scope to automatize the conversion from json text to python objects.\n",
" Read a json string from the db and return a python list.\n",
" \"\"\"\n",
" try:\n",
" return json.loads(self._scope)\n",
" except ValueError:\n",
" return None\n",
"\n",
" @scope.setter\n",
" def scope(self, value):\n",
" \"\"\"\n",
" Setter property for _scope to automatize the conversion from json text to python objects.\n",
" Receive a python list and store a json string to the db.\n",
" \"\"\"\n",
" self._scope = json.dumps(value, indent=4)\n",
"\n",
" @property\n",
" def client_id(self):\n",
" \"\"\"\n",
" Getter property for <PROVIDER_NAME>_CLIENT_ID environment variable.\n",
" \"\"\"\n",
" try:\n",
" var_name = \"{}_CLIENT_ID\".format(self.name.upper())\n",
" return os.environ[var_name]\n",
" except KeyError:\n",
" msg = \"You must set the environment variable: {}\".format(var_name)\n",
" raise ImproperlyConfigured(msg)\n",
"\n",
" @property\n",
" def client_secret(self):\n",
" \"\"\"\n",
" Getter property for <PROVIDER_NAME>_CLIENT_SECRET environment variable.\n",
" \"\"\"\n",
" try:\n",
" var_name = \"{}_CLIENT_SECRET\".format(self.name.upper())\n",
" return os.environ[var_name]\n",
" except KeyError:\n",
" msg = \"You must set the environment variable: {}\".format(var_name)\n",
" raise ImproperlyConfigured(msg)\n",
"\n",
" def __str__(self):\n",
" return \"{}\".format(self.name)\n",
"\n",
"\n",
"class BearerToken(models.Model):\n",
" \"\"\"\n",
" Token that a bearer like Moogle and Magpie can use to get access to OAuth 2.0 protected\n",
" resources. A protected resource belongs to a resource owner (user): it can be a Facebook\n",
" profile, Google Drive documents, tweets on Twitter, ...\n",
" \"\"\"\n",
" # `user` is the resource owner\n",
" user = models.ForeignKey(User)\n",
" provider = models.ForeignKey(Provider)\n",
" _token_set = models.TextField()\n",
"\n",
" objects = BearerTokenManager()\n",
"\n",
" # `_token_set` is json text containing a serialized dictionary like:\n",
" #{\n",
" # \"refresh_token\": \"1/UTY6FA......XHCSIvhm1dghJHHG678\",\n",
" # \"expires_in\": 3600,\n",
" # \"token_type\": \"Bearer\",\n",
" # \"access_token\": \"ya29.1.AADtN_VwezbeOQGkJE4_3ZDNZimrRf86Dn...pL8YB1rpVRhav0-mIiHEmV8\",\n",
" # \"id_token\": \"eyJhbGciOiJSUzI1NiIsI...U3MWJlNZoempIreV572mbxH7Rm90eNQwfShPQnI49u8bZgc\"\n",
" #}\n",
" # This example is a OAuth2 token (from Google) but it can be also OAuth1 token (like Twitter).\n",
" # A getter and a setter property are defined on this field to automatize the conversion\n",
" # from json text to python objects.\n",
" #\n",
" # Getter and setter properties for _token_set to automatize the conversion from json text\n",
" # to python objects.\n",
" # The getter reads a json string from the db and returns a python dictionary.\n",
" @property\n",
" def token_set(self):\n",
" \"\"\"\n",
" Getter property for `_token_set` to automatize the conversion from json text to python\n",
" objects. Read a json string from the db and return a python dictionary.\n",
" \"\"\"\n",
" try:\n",
" return json.loads(self._token_set)\n",
" except ValueError:\n",
" return None\n",
"\n",
" @token_set.setter\n",
" def token_set(self, value):\n",
" \"\"\"\n",
" Setter property for `_token_set` to automatize the conversion from json text to python\n",
" objects. Receive a python dictionary and store a json string to the db.\n",
" \"\"\"\n",
" self._token_set = json.dumps(value, indent=4)\n",
"\n",
" @property\n",
" def access_token(self):\n",
" \"\"\"\n",
" Getter property for the access_token stored in `token_set` dictionary.\n",
" \"\"\"\n",
" return self.token_set.get('access_token', '')\n",
"\n",
" @property\n",
" def refresh_token(self):\n",
" \"\"\"\n",
" Getter property for the refresh_token stored in `token_set` dictionary.\n",
" \"\"\"\n",
" return self.token_set.get('refresh_token', '')\n",
"\n",
" class Meta:\n",
" unique_together = (\"user\", \"provider\")\n",
"\n",
" def __str__(self):\n",
" return \"{}, {}\".format(\n",
" self.provider.get_name_display(),\n",
" self.user.get_full_name() or self.user.get_username()\n",
" )"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.010309278350515464,
0.01020408163265306,
0.011494252873563218,
0,
0.010752688172043012,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0,
0,
0.012345679012345678,
0.011494252873563218,
0,
0,
0.011904761904761904,
0.010869565217391304,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0.010416666666666666,
0.010526315789473684,
0.14285714285714285,
0.010101010101010102,
0.010869565217391304,
0,
0,
0.010638297872340425,
0,
0.012195121951219513,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111
] | 170 | 0.003864 |
#coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def load_data():
#下载公司基本信息,包括股票代码、pe、市盈率等数据
try:
rs=ts.new_stocks()
pd.DataFrame.to_sql(rs, table_name, con=conn , flavor='mysql', if_exists='replace',index=True)
except Exception as e:
print(e.message)
print("加载新股数据出错")
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载新股数据-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_new_stocks'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
#--------------------脚本运行开始--------------------------------
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| [
"#coding=utf8\n",
"\n",
"import tushare as ts;\n",
"import pymysql;\n",
"import time as dt\n",
"from datashape.coretypes import string\n",
"from pandas.io.sql import SQLDatabase\n",
"import sqlalchemy\n",
"import datetime\n",
"from sqlalchemy import create_engine\n",
"from pandas.io import sql\n",
"import threading\n",
"import pandas as pd;\n",
"import sys\n",
"sys.path.append('../') #添加配置文件\n",
"from common_function import *\n",
"\n",
"def load_data():\n",
" #下载公司基本信息,包括股票代码、pe、市盈率等数据\n",
" try:\n",
" rs=ts.new_stocks()\n",
" pd.DataFrame.to_sql(rs, table_name, con=conn , flavor='mysql', if_exists='replace',index=True)\n",
" except Exception as e:\n",
" print(e.message)\n",
" print(\"加载新股数据出错\")\n",
"\n",
"if __name__ == '__main__':\n",
" #--------------------设置基本信息---------------------------------\n",
" print(\"--------------加载新股数据-----------------------------\")\n",
" startTime=dt.time()\n",
" iphost,user,passwd=get_mysql_conn()\n",
" db='ods_data'\n",
" charset='utf8'\n",
" table_name='ods_invest_refer_new_stocks'\n",
" conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)\n",
" #--------------------脚本运行开始--------------------------------\n",
" load_data()\n",
" endTime=dt.time()\n",
" print(\"---------------脚本运行完毕,共计耗费时间%sS------------------\"%(endTime-startTime))\n"
] | [
0.07692307692307693,
0,
0.045454545454545456,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0.06451612903225806,
0.0967741935483871,
0,
0.058823529411764705,
0.05714285714285714,
0,
0.037037037037037035,
0.02912621359223301,
0,
0,
0,
0,
0.037037037037037035,
0.015384615384615385,
0,
0.041666666666666664,
0.075,
0.05555555555555555,
0.05263157894736842,
0.022222222222222223,
0.03409090909090909,
0.015625,
0,
0.045454545454545456,
0.024096385542168676
] | 39 | 0.025505 |
"""Tests for the lsstprojectmeta.tex.normalizer module."""
from __future__ import annotations
import re
from pathlib import Path
import pytest
from lander.ext.parser.texutils.normalize import (
input_include_pattern,
read_tex_file,
remove_comments,
remove_trailing_whitespace,
replace_macros,
)
def test_remove_comments_abstract() -> None:
sample = (
r"\setDocAbstract{%" + "\n"
" The LSST Data Management System (DMS) is a set of services\n"
" employing a variety of software components running on\n"
" computational and networking infrastructure that combine to\n"
" deliver science data products to the observatory's users and\n"
" support observatory operations. This document describes the\n"
" components, their service instances, and their deployment\n"
" environments as well as the interfaces among them, the rest\n"
" of the LSST system, and the outside world.\n"
"}"
)
expected = (
r"\setDocAbstract{" + "\n"
" The LSST Data Management System (DMS) is a set of services\n"
" employing a variety of software components running on\n"
" computational and networking infrastructure that combine to\n"
" deliver science data products to the observatory's users and\n"
" support observatory operations. This document describes the\n"
" components, their service instances, and their deployment\n"
" environments as well as the interfaces among them, the rest\n"
" of the LSST system, and the outside world.\n"
"}"
)
assert remove_comments(sample) == expected
def test_escaped_remove_comments() -> None:
"""Test remove_comments where a "%" is escaped."""
sample = r"The uncertainty is 5\%. % a comment"
expected = r"The uncertainty is 5\%. "
assert remove_comments(sample) == expected
def test_single_line_remove_comments() -> None:
sample = "This is content. % a comment"
expected = "This is content. "
assert remove_comments(sample) == expected
def test_remove_single_line_trailing_whitespace() -> None:
sample = "This is content. "
expected = "This is content."
assert remove_trailing_whitespace(sample) == expected
def test_multi_line_trailing_whitespace() -> None:
sample = "First line. \n" "Second line. "
expected = "First line.\n" "Second line."
assert remove_trailing_whitespace(sample) == expected
def test_read_tex_file() -> None:
project_dir = Path(__file__).parent / "data" / "texinputs"
root_filepath = project_dir / "LDM-nnn.tex"
tex_source = read_tex_file(root_filepath)
# verify that input'd and include'd content is present
assert re.search(r"\\setDocAbstract", tex_source) is not None
assert re.search(r"\\section{Introduction}", tex_source) is not None
def test_replace_macros() -> None:
sample = (
r"\def \product {Data Management}" + "\n"
r"\title [Test Plan] { \product\ Test Plan}" + "\n"
r"\setDocAbstract {" + "\n"
r"This is the Test Plan for \product.}"
)
expected = (
r"\def Data Management {Data Management}" + "\n"
r"\title [Test Plan] { Data Management Test Plan}" + "\n"
r"\setDocAbstract {" + "\n"
r"This is the Test Plan for Data Management.}"
)
macros = {r"\product": "Data Management"}
tex_source = replace_macros(sample, macros)
assert re.search(r"\\product", sample) is not None # sanity check
assert re.search(r"\\product", tex_source) is None
assert tex_source == expected
@pytest.mark.parametrize(
"sample,expected",
[
(r"\input{file.tex}", "file.tex"),
(r"\input{dirname/file.tex}", "dirname/file.tex"),
(r"\input {file}%", "file"),
(r"\input file%", "file"),
(r"\input file" + "\n", "file"),
(r"\input file " + "\n", "file"),
(r"\include{file.tex}", "file.tex"),
(r"\include{dirname/file.tex}", "dirname/file.tex"),
(r"\include {file}%", "file"),
(r"\include file%", "file"),
(r"\include file" + "\n", "file"),
(r"\include file" + " \n", "file"),
],
)
def test_input_include_pattern(sample: str, expected: str) -> None:
match = re.search(input_include_pattern, sample)
assert match is not None
assert match.group("filename") == expected
def test_non_inputs() -> None:
r"""Test for patterns like ``\inputData{XYZ}`` that have in the past been
detected as an ``\input`` command.
"""
sample = r"\newcommand{\inputData}[1]{\texttt{#1}}"
match = re.search(input_include_pattern, sample)
assert match is None
| [
"\"\"\"Tests for the lsstprojectmeta.tex.normalizer module.\"\"\"\n",
"\n",
"from __future__ import annotations\n",
"\n",
"import re\n",
"from pathlib import Path\n",
"\n",
"import pytest\n",
"\n",
"from lander.ext.parser.texutils.normalize import (\n",
" input_include_pattern,\n",
" read_tex_file,\n",
" remove_comments,\n",
" remove_trailing_whitespace,\n",
" replace_macros,\n",
")\n",
"\n",
"\n",
"def test_remove_comments_abstract() -> None:\n",
" sample = (\n",
" r\"\\setDocAbstract{%\" + \"\\n\"\n",
" \" The LSST Data Management System (DMS) is a set of services\\n\"\n",
" \" employing a variety of software components running on\\n\"\n",
" \" computational and networking infrastructure that combine to\\n\"\n",
" \" deliver science data products to the observatory's users and\\n\"\n",
" \" support observatory operations. This document describes the\\n\"\n",
" \" components, their service instances, and their deployment\\n\"\n",
" \" environments as well as the interfaces among them, the rest\\n\"\n",
" \" of the LSST system, and the outside world.\\n\"\n",
" \"}\"\n",
" )\n",
" expected = (\n",
" r\"\\setDocAbstract{\" + \"\\n\"\n",
" \" The LSST Data Management System (DMS) is a set of services\\n\"\n",
" \" employing a variety of software components running on\\n\"\n",
" \" computational and networking infrastructure that combine to\\n\"\n",
" \" deliver science data products to the observatory's users and\\n\"\n",
" \" support observatory operations. This document describes the\\n\"\n",
" \" components, their service instances, and their deployment\\n\"\n",
" \" environments as well as the interfaces among them, the rest\\n\"\n",
" \" of the LSST system, and the outside world.\\n\"\n",
" \"}\"\n",
" )\n",
" assert remove_comments(sample) == expected\n",
"\n",
"\n",
"def test_escaped_remove_comments() -> None:\n",
" \"\"\"Test remove_comments where a \"%\" is escaped.\"\"\"\n",
" sample = r\"The uncertainty is 5\\%. % a comment\"\n",
" expected = r\"The uncertainty is 5\\%. \"\n",
" assert remove_comments(sample) == expected\n",
"\n",
"\n",
"def test_single_line_remove_comments() -> None:\n",
" sample = \"This is content. % a comment\"\n",
" expected = \"This is content. \"\n",
" assert remove_comments(sample) == expected\n",
"\n",
"\n",
"def test_remove_single_line_trailing_whitespace() -> None:\n",
" sample = \"This is content. \"\n",
" expected = \"This is content.\"\n",
" assert remove_trailing_whitespace(sample) == expected\n",
"\n",
"\n",
"def test_multi_line_trailing_whitespace() -> None:\n",
" sample = \"First line. \\n\" \"Second line. \"\n",
" expected = \"First line.\\n\" \"Second line.\"\n",
" assert remove_trailing_whitespace(sample) == expected\n",
"\n",
"\n",
"def test_read_tex_file() -> None:\n",
" project_dir = Path(__file__).parent / \"data\" / \"texinputs\"\n",
" root_filepath = project_dir / \"LDM-nnn.tex\"\n",
" tex_source = read_tex_file(root_filepath)\n",
"\n",
" # verify that input'd and include'd content is present\n",
" assert re.search(r\"\\\\setDocAbstract\", tex_source) is not None\n",
" assert re.search(r\"\\\\section{Introduction}\", tex_source) is not None\n",
"\n",
"\n",
"def test_replace_macros() -> None:\n",
" sample = (\n",
" r\"\\def \\product {Data Management}\" + \"\\n\"\n",
" r\"\\title [Test Plan] { \\product\\ Test Plan}\" + \"\\n\"\n",
" r\"\\setDocAbstract {\" + \"\\n\"\n",
" r\"This is the Test Plan for \\product.}\"\n",
" )\n",
"\n",
" expected = (\n",
" r\"\\def Data Management {Data Management}\" + \"\\n\"\n",
" r\"\\title [Test Plan] { Data Management Test Plan}\" + \"\\n\"\n",
" r\"\\setDocAbstract {\" + \"\\n\"\n",
" r\"This is the Test Plan for Data Management.}\"\n",
" )\n",
"\n",
" macros = {r\"\\product\": \"Data Management\"}\n",
" tex_source = replace_macros(sample, macros)\n",
" assert re.search(r\"\\\\product\", sample) is not None # sanity check\n",
" assert re.search(r\"\\\\product\", tex_source) is None\n",
" assert tex_source == expected\n",
"\n",
"\n",
"@pytest.mark.parametrize(\n",
" \"sample,expected\",\n",
" [\n",
" (r\"\\input{file.tex}\", \"file.tex\"),\n",
" (r\"\\input{dirname/file.tex}\", \"dirname/file.tex\"),\n",
" (r\"\\input {file}%\", \"file\"),\n",
" (r\"\\input file%\", \"file\"),\n",
" (r\"\\input file\" + \"\\n\", \"file\"),\n",
" (r\"\\input file \" + \"\\n\", \"file\"),\n",
" (r\"\\include{file.tex}\", \"file.tex\"),\n",
" (r\"\\include{dirname/file.tex}\", \"dirname/file.tex\"),\n",
" (r\"\\include {file}%\", \"file\"),\n",
" (r\"\\include file%\", \"file\"),\n",
" (r\"\\include file\" + \"\\n\", \"file\"),\n",
" (r\"\\include file\" + \" \\n\", \"file\"),\n",
" ],\n",
")\n",
"def test_input_include_pattern(sample: str, expected: str) -> None:\n",
" match = re.search(input_include_pattern, sample)\n",
" assert match is not None\n",
" assert match.group(\"filename\") == expected\n",
"\n",
"\n",
"def test_non_inputs() -> None:\n",
" r\"\"\"Test for patterns like ``\\inputData{XYZ}`` that have in the past been\n",
" detected as an ``\\input`` command.\n",
" \"\"\"\n",
" sample = r\"\\newcommand{\\inputData}[1]{\\texttt{#1}}\"\n",
" match = re.search(input_include_pattern, sample)\n",
" assert match is None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 133 | 0 |
import foauth.providers
from foauth import OAuthDenied
class Elance(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'https://www.elance.com/'
docs_url = 'https://www.elance.com/q/api2'
category = 'Career'
# URLs to interact with the API
authorize_url = 'https://api.elance.com/api2/oauth/authorize'
access_token_url = 'https://api.elance.com/api2/oauth/token'
api_domain = 'api.elance.com'
available_permissions = [
(None, 'access and manage your Elance account'),
]
bearer_type = foauth.providers.BEARER_URI
def parse_token(self, content):
return super(Elance, self).parse_token(content)[u'data']
def callback(self, data, *args, **kwargs):
if data.get('error') == 'access_denied':
raise OAuthDenied('Denied access to Elance')
return super(Elance, self).callback(data, *args, **kwargs)
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/api2/profiles/my')
return unicode(r.json()[u'data'][u'providerProfile'][u'userId'])
| [
"import foauth.providers\n",
"from foauth import OAuthDenied\n",
"\n",
"\n",
"class Elance(foauth.providers.OAuth2):\n",
" # General info about the provider\n",
" provider_url = 'https://www.elance.com/'\n",
" docs_url = 'https://www.elance.com/q/api2'\n",
" category = 'Career'\n",
"\n",
" # URLs to interact with the API\n",
" authorize_url = 'https://api.elance.com/api2/oauth/authorize'\n",
" access_token_url = 'https://api.elance.com/api2/oauth/token'\n",
" api_domain = 'api.elance.com'\n",
"\n",
" available_permissions = [\n",
" (None, 'access and manage your Elance account'),\n",
" ]\n",
"\n",
" bearer_type = foauth.providers.BEARER_URI\n",
"\n",
" def parse_token(self, content):\n",
" return super(Elance, self).parse_token(content)[u'data']\n",
"\n",
" def callback(self, data, *args, **kwargs):\n",
" if data.get('error') == 'access_denied':\n",
" raise OAuthDenied('Denied access to Elance')\n",
"\n",
" return super(Elance, self).callback(data, *args, **kwargs)\n",
"\n",
" def get_user_id(self, key):\n",
" r = self.api(key, self.api_domain, u'/api2/profiles/my')\n",
" return unicode(r.json()[u'data'][u'providerProfile'][u'userId'])\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 33 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from time import time
from threading import Lock
# 10kb minimum rate
MIN_RATE = 10240
class Bucket:
def __init__(self):
self.rate = 0 # bytes per second, maximum targeted throughput
self.tokens = 0
self.timestamp = time()
self.lock = Lock()
def __nonzero__(self):
return False if self.rate < MIN_RATE else True
def setRate(self, rate):
self.lock.acquire()
self.rate = int(rate)
self.lock.release()
def consumed(self, amount):
""" return the time the process has to sleep, after it consumed a specified amount """
if self.rate < MIN_RATE: return 0 #May become unresponsive otherwise
self.lock.acquire()
self.calc_tokens()
self.tokens -= amount
if self.tokens < 0:
time = -self.tokens/float(self.rate)
else:
time = 0
self.lock.release()
return time
def calc_tokens(self):
if self.tokens < self.rate:
now = time()
delta = self.rate * (now - self.timestamp)
self.tokens = min(self.rate, self.tokens + delta)
self.timestamp = now
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\"\"\"\n",
" This program is free software; you can redistribute it and/or modify\n",
" it under the terms of the GNU General Public License as published by\n",
" the Free Software Foundation; either version 3 of the License,\n",
" or (at your option) any later version.\n",
"\n",
" This program is distributed in the hope that it will be useful,\n",
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n",
" See the GNU General Public License for more details.\n",
"\n",
" You should have received a copy of the GNU General Public License\n",
" along with this program; if not, see <http://www.gnu.org/licenses/>.\n",
" \n",
" @author: RaNaN\n",
"\"\"\"\n",
"\n",
"from time import time\n",
"from threading import Lock\n",
"\n",
"# 10kb minimum rate\n",
"MIN_RATE = 10240\n",
"\n",
"class Bucket:\n",
" def __init__(self):\n",
" self.rate = 0 # bytes per second, maximum targeted throughput\n",
" self.tokens = 0\n",
" self.timestamp = time()\n",
" self.lock = Lock()\n",
"\n",
" def __nonzero__(self):\n",
" return False if self.rate < MIN_RATE else True\n",
"\n",
" def setRate(self, rate):\n",
" self.lock.acquire()\n",
" self.rate = int(rate)\n",
" self.lock.release()\n",
"\n",
" def consumed(self, amount):\n",
" \"\"\" return the time the process has to sleep, after it consumed a specified amount \"\"\"\n",
" if self.rate < MIN_RATE: return 0 #May become unresponsive otherwise\n",
" self.lock.acquire()\n",
"\n",
" self.calc_tokens()\n",
" self.tokens -= amount\n",
"\n",
" if self.tokens < 0:\n",
" time = -self.tokens/float(self.rate)\n",
" else:\n",
" time = 0\n",
"\n",
" self.lock.release()\n",
" return time\n",
"\n",
" def calc_tokens(self):\n",
" if self.tokens < self.rate:\n",
" now = time()\n",
" delta = self.rate * (now - self.timestamp)\n",
" self.tokens = min(self.rate, self.tokens + delta)\n",
" self.timestamp = now\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0.03896103896103896,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 63 | 0.021194 |
from PyQt5.QtWidgets import QTreeWidgetItem
from TriblerGUI.utilities import prec_div
class TickWidgetItem(QTreeWidgetItem):
"""
This class represents a widget that displays a tick (either an ask or a bid).
"""
def __init__(self, parent, tick, asset1_prec, asset2_prec):
QTreeWidgetItem.__init__(self, parent)
self.tick = tick
self.total_volume = prec_div(tick["assets"]["first"]["amount"], asset1_prec)
self.cur_volume = prec_div(tick["assets"]["first"]["amount"] - tick["traded"], asset1_prec)
self.price = float(self.total_volume) / float(prec_div(tick["assets"]["second"]["amount"], asset2_prec))
if self.tick["type"] == "ask":
self.setText(0, "%g" % self.price)
self.setText(1, "%g" % self.cur_volume)
self.setText(2, "%g" % self.total_volume)
else:
self.setText(0, "%g" % self.total_volume)
self.setText(1, "%g" % self.cur_volume)
self.setText(2, "%g" % self.price)
@property
def is_ask(self):
return self.tick["type"] == "ask"
def __lt__(self, other):
column = self.treeWidget().sortColumn()
if self.is_ask and column == 0 or not self.is_ask and column == 2:
return self.price > other.price
if column == 1:
return self.cur_volume > other.cur_volume
if self.is_ask and column == 2 or not self.is_ask and column == 0:
return self.total_volume > other.total_volume
return self.text(column) > other.text(column)
| [
"from PyQt5.QtWidgets import QTreeWidgetItem\n",
"from TriblerGUI.utilities import prec_div\n",
"\n",
"\n",
"class TickWidgetItem(QTreeWidgetItem):\n",
" \"\"\"\n",
" This class represents a widget that displays a tick (either an ask or a bid).\n",
" \"\"\"\n",
"\n",
" def __init__(self, parent, tick, asset1_prec, asset2_prec):\n",
" QTreeWidgetItem.__init__(self, parent)\n",
" self.tick = tick\n",
"\n",
" self.total_volume = prec_div(tick[\"assets\"][\"first\"][\"amount\"], asset1_prec)\n",
" self.cur_volume = prec_div(tick[\"assets\"][\"first\"][\"amount\"] - tick[\"traded\"], asset1_prec)\n",
"\n",
" self.price = float(self.total_volume) / float(prec_div(tick[\"assets\"][\"second\"][\"amount\"], asset2_prec))\n",
"\n",
" if self.tick[\"type\"] == \"ask\":\n",
" self.setText(0, \"%g\" % self.price)\n",
" self.setText(1, \"%g\" % self.cur_volume)\n",
" self.setText(2, \"%g\" % self.total_volume)\n",
" else:\n",
" self.setText(0, \"%g\" % self.total_volume)\n",
" self.setText(1, \"%g\" % self.cur_volume)\n",
" self.setText(2, \"%g\" % self.price)\n",
"\n",
" @property\n",
" def is_ask(self):\n",
" return self.tick[\"type\"] == \"ask\"\n",
"\n",
" def __lt__(self, other):\n",
" column = self.treeWidget().sortColumn()\n",
" if self.is_ask and column == 0 or not self.is_ask and column == 2:\n",
" return self.price > other.price\n",
" if column == 1:\n",
" return self.cur_volume > other.cur_volume\n",
" if self.is_ask and column == 2 or not self.is_ask and column == 0:\n",
" return self.total_volume > other.total_volume\n",
"\n",
" return self.text(column) > other.text(column)\n"
] | [
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.01,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 41 | 0.001044 |
#YATZY version 1.6
#This is a yatzy game please enjoy
#2 player mode
def reroll(plyr, dice):
for i in range (0,5):
print("%s) %s\n" % (i+1,dice[plyr][i]))
ans=[]
while True:
answ=input("enter 1-5 ")
try:
answ=int(answ)
break
except ValueError:
if answ=="":
break
continue
print("")
answ=str(answ)
for i in range(0, len(answ)):
ans.append("")
ans[i]=int(answ[i])-1
for j in range(0, len(ans)):
for i in range(0,5):
i = str(i)
if i in str(ans[j]):
i=int(i)
dice[plyr][i]=random.randint(1,6)
i=int(i)
def prints(dices, plyr, dice):
if dices:
for i in range (0,5):
print("%s) %s\n" % (i+1,dice[plyr][i]))
for i in range(1,7):
print("%s) %s %s %s"%(i,stuff[i],points[0][i], points[1][i]))
print("Sum %s %s"%(sum[0], sum[1]))
print("Bonus %s %s"%(bonus[0], bonus[1])) #you need 63 sum on upper for you to get bonus
for i in range(7,16):
print("%s) %s %s %s"%(i,stuff[i],points[0][i], points[1][i]))
print("Total %s %s"%(tot[0], tot[1]))
print("\n")
def check(plyr, dice):
while True:
ans = input("enter 1-15 ")
try:
ans=int(ans)
break
except ValueError:
continue
print("\n\n")
if ans == 1 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 2 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 3 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 4 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 5 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 6 and points[plyr][ans]==0:
for i in range (0,5):
if ans == dice[plyr][i]:
points[plyr][ans]+=ans
if points[plyr][ans]==0:
points[plyr][ans]="STRIKE"
elif ans == 7 and points[plyr][ans]==0:
for i in range(4,0,-1):
if dice[plyr][i]==dice[plyr][i-1]:
points[plyr][ans]=dice[plyr][i]*2
break
else:
points[plyr][ans]="STRIKE"
elif ans == 8 and points[plyr][ans]==0:
if dice[plyr][0]==dice[plyr][1] and dice[plyr][2]==dice[plyr][3]:
points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]
elif dice[plyr][1]==dice[plyr][2] and dice[plyr][3]==dice[plyr][4]:
points[plyr][ans]=dice[plyr][4]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]
elif dice[plyr][0]==dice[plyr][1] and dice[plyr][3]==dice[plyr][4]:
points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][3]+dice[plyr][4]
else:
points[plyr][ans]="STRIKE"
elif ans == 9 and points[plyr][ans]==0:
for i in range(4,1,-1):
if dice[plyr][i]==dice[plyr][i-1]==dice[plyr][i-2]:
points[plyr][ans]=dice[plyr][i]*3
break
else:
points[plyr][ans]="STRIKE"
elif ans == 10 and points[plyr][ans]==0:
for i in range(4,2,-1):
if dice[plyr][i]==dice[plyr][i-1]==dice[plyr][i-2]==dice[plyr][i-3]:
points[plyr][ans]=dice[plyr][i]*4
break
else:
points[plyr][ans]="STRIKE"
elif ans == 11 and points[plyr][ans]==0:
if dice[plyr][0]==1 and dice[plyr][1]==2 and dice[plyr][2]==3 and dice[plyr][3]==4 and dice[plyr][4]==5:
points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]
else:
points[plyr][ans]="STRIKE"
elif ans == 12 and points[plyr][ans]==0:
if dice[plyr][0]==2 and dice[plyr][1]==3 and dice[plyr][2]==4 and dice[plyr][3]==5 and dice[plyr][4]==6:
points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]
else:
points[plyr][ans]="STRIKE"
elif ans == 13 and points[plyr][ans]==0:
if dice[plyr][0]==dice[plyr][1] and dice[plyr][2]==dice[plyr][3]==dice[plyr][4]:
points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]
elif dice[plyr][0]==dice[plyr][1]==dice[plyr][2] and dice[plyr][3]==dice[plyr][4]:
points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]
else:
points[plyr][ans]="STRIKE"
elif ans == 14 and points[plyr][ans]==0:
points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]
elif ans == 15 and points[plyr][ans]==0:
if dice[plyr][0]==dice[plyr][1]==dice[plyr][2]==dice[plyr][3]==dice[plyr][4]:
points[plyr][ans]=50
else:
points[plyr][ans]="STRIKE"
else:
while True:
randomNum=random.randint(1,15)
if points[plyr][randomNum]==0:
points[plyr][randomNum]="STRIKE"
break
else:
continue
def game(plyr):
dice=[[random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6)], [random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6)]]
reroll(plyr, dice)
reroll(plyr, dice)
dice[plyr].sort()
print("chose from the list which one you want\"if you want to strike something you pick something you cannot do\"")
prints(True, plyr, dice)
check(plyr, dice)
sum[plyr]=0
for i in range(1, 7):
if str(points[plyr][i])=="STRIKE":
continue
else:
sum[plyr]+=points[plyr][i]
if points[plyr][1]!=0 and points[plyr][2]!=0 and points[plyr][3]!=0 and points[plyr][4]!=0 and points[plyr][5]!=0 and points[plyr][6]!=0 and sumt[plyr]==0:
sum[plyr]=0
for i in range(1, 7):
if "STRIKE" in str(points[plyr][i]):
continue
else:
sum[plyr]+=points[plyr][i]
if sum[plyr]>=63:
bonus[plyr]=50
sumt[plyr]=1
prints(False, plyr, dice)
import random
sum=[0, 0]
bonus=[0, 0]
tot=[0, 0]
sumt=[0, 0]
stuff={1:"Aces",2:"Twos",3:"Threes",4:"Fours",5:"Fives",6:"Sixes",7:"Pair",8:"Two pairs",9:"Three Of A Kind",10:"Four Of A Kind",11:"Small Straight",12:"Large Straight",13:"Full House",14:"Chance",15:"YATZY"}
points=[{1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}, {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}]
while True:
game(0)
if (points[0][1]!=0 and points[0][2]!=0 and points[0][3]!=0 and points[0][4]!=0 and points[0][5]!=0 and points[0][6]!=0
and points[0][7]!=0 and points[0][8]!=0 and points[0][9]!=0 and points[0][10]!=0 and points[0][11]!=0 and points[0][12]!=0
and points[0][13]!=0 and points[0][14]!=0 and points[0][15]!=0 and points[1][1]!=0 and points[1][2]!=0 and points[1][3]!=0
and points[1][4]!=0 and points[1][5]!=0 and points[1][6]!=0 and points[1][7]!=0 and points[1][8]!=0 and points[1][9]!=0
and points[1][10]!=0 and points[1][11]!=0 and points[1][12]!=0 and points[1][13]!=0 and points[1][14]!=0 and points[1][15]!=0):
break
input("Press enter player 2")
game(1)
if (points[0][1]!=0 and points[0][2]!=0 and points[0][3]!=0 and points[0][4]!=0 and points[0][5]!=0 and points[0][6]!=0
and points[0][7]!=0 and points[0][8]!=0 and points[0][9]!=0 and points[0][10]!=0 and points[0][11]!=0 and points[0][12]!=0
and points[0][13]!=0 and points[0][14]!=0 and points[0][15]!=0 and points[1][1]!=0 and points[1][2]!=0 and points[1][3]!=0
and points[1][4]!=0 and points[1][5]!=0 and points[1][6]!=0 and points[1][7]!=0 and points[1][8]!=0 and points[1][9]!=0
and points[1][10]!=0 and points[1][11]!=0 and points[1][12]!=0 and points[1][13]!=0 and points[1][14]!=0 and points[1][15]!=0):
break
input("Press enter player 1")
for i in range(1, 16):
if "STRIKE" in str(points[0][i]):
continue
else:
tot[0]+=points[0][i]
tot[0]+=bonus[0]
for i in range(1, 16):
if "STRIKE" in str(points[1][i]):
continue
else:
tot[1]+=points[1][i]
tot[1]+=bonus[1]
prints(False, 0, 0)
#TODO ADD CHOICE TO PICK 1 or 2 player
| [
"#YATZY version 1.6\n",
"#This is a yatzy game please enjoy\n",
"#2 player mode\n",
"def reroll(plyr, dice):\n",
" for i in range (0,5):\n",
" print(\"%s) %s\\n\" % (i+1,dice[plyr][i]))\n",
" ans=[]\n",
" while True:\n",
" answ=input(\"enter 1-5 \")\n",
" try:\n",
" answ=int(answ)\n",
" break\n",
" except ValueError:\n",
" if answ==\"\":\n",
" break\n",
" continue\n",
" print(\"\")\n",
" answ=str(answ)\n",
" for i in range(0, len(answ)):\n",
" ans.append(\"\")\n",
" ans[i]=int(answ[i])-1\n",
" for j in range(0, len(ans)):\n",
" for i in range(0,5):\n",
" i = str(i)\n",
" if i in str(ans[j]):\n",
" i=int(i)\n",
" dice[plyr][i]=random.randint(1,6)\n",
" i=int(i)\n",
"def prints(dices, plyr, dice):\n",
" if dices:\n",
" for i in range (0,5):\n",
" print(\"%s) %s\\n\" % (i+1,dice[plyr][i])) \n",
" for i in range(1,7):\n",
" print(\"%s) %s %s %s\"%(i,stuff[i],points[0][i], points[1][i]))\n",
" print(\"Sum %s %s\"%(sum[0], sum[1]))\n",
" print(\"Bonus %s %s\"%(bonus[0], bonus[1])) #you need 63 sum on upper for you to get bonus\n",
" for i in range(7,16):\n",
" print(\"%s) %s %s %s\"%(i,stuff[i],points[0][i], points[1][i]))\n",
" print(\"Total %s %s\"%(tot[0], tot[1]))\n",
" print(\"\\n\")\n",
"def check(plyr, dice):\n",
" while True:\n",
" ans = input(\"enter 1-15 \")\n",
" try:\n",
" ans=int(ans)\n",
" break\n",
" except ValueError:\n",
" continue \n",
" print(\"\\n\\n\")\n",
" if ans == 1 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 2 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 3 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 4 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 5 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 6 and points[plyr][ans]==0:\n",
" for i in range (0,5):\n",
" if ans == dice[plyr][i]:\n",
" points[plyr][ans]+=ans\n",
" if points[plyr][ans]==0:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 7 and points[plyr][ans]==0:\n",
" for i in range(4,0,-1):\n",
" if dice[plyr][i]==dice[plyr][i-1]:\n",
" points[plyr][ans]=dice[plyr][i]*2\n",
" break\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 8 and points[plyr][ans]==0:\n",
" if dice[plyr][0]==dice[plyr][1] and dice[plyr][2]==dice[plyr][3]:\n",
" points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]\n",
" elif dice[plyr][1]==dice[plyr][2] and dice[plyr][3]==dice[plyr][4]:\n",
" points[plyr][ans]=dice[plyr][4]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]\n",
" elif dice[plyr][0]==dice[plyr][1] and dice[plyr][3]==dice[plyr][4]:\n",
" points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][3]+dice[plyr][4]\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 9 and points[plyr][ans]==0:\n",
" for i in range(4,1,-1):\n",
" if dice[plyr][i]==dice[plyr][i-1]==dice[plyr][i-2]:\n",
" points[plyr][ans]=dice[plyr][i]*3\n",
" break\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 10 and points[plyr][ans]==0:\n",
" for i in range(4,2,-1):\n",
" if dice[plyr][i]==dice[plyr][i-1]==dice[plyr][i-2]==dice[plyr][i-3]:\n",
" points[plyr][ans]=dice[plyr][i]*4\n",
" break\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 11 and points[plyr][ans]==0:\n",
" if dice[plyr][0]==1 and dice[plyr][1]==2 and dice[plyr][2]==3 and dice[plyr][3]==4 and dice[plyr][4]==5:\n",
" points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 12 and points[plyr][ans]==0:\n",
" if dice[plyr][0]==2 and dice[plyr][1]==3 and dice[plyr][2]==4 and dice[plyr][3]==5 and dice[plyr][4]==6:\n",
" points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 13 and points[plyr][ans]==0:\n",
" if dice[plyr][0]==dice[plyr][1] and dice[plyr][2]==dice[plyr][3]==dice[plyr][4]:\n",
" points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]\n",
" elif dice[plyr][0]==dice[plyr][1]==dice[plyr][2] and dice[plyr][3]==dice[plyr][4]:\n",
" points[plyr][ans] = dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" elif ans == 14 and points[plyr][ans]==0:\n",
" points[plyr][ans]=dice[plyr][0]+dice[plyr][1]+dice[plyr][2]+dice[plyr][3]+dice[plyr][4]\n",
" elif ans == 15 and points[plyr][ans]==0:\n",
" if dice[plyr][0]==dice[plyr][1]==dice[plyr][2]==dice[plyr][3]==dice[plyr][4]:\n",
" points[plyr][ans]=50\n",
" else:\n",
" points[plyr][ans]=\"STRIKE\"\n",
" else:\n",
" while True:\n",
" randomNum=random.randint(1,15)\n",
" if points[plyr][randomNum]==0:\n",
" points[plyr][randomNum]=\"STRIKE\"\n",
" break\n",
" else:\n",
" continue\n",
"def game(plyr):\n",
" dice=[[random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6)], [random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6),random.randint(1,6)]]\n",
" reroll(plyr, dice)\n",
" reroll(plyr, dice)\n",
" dice[plyr].sort()\n",
" print(\"chose from the list which one you want\\\"if you want to strike something you pick something you cannot do\\\"\")\n",
" prints(True, plyr, dice)\n",
" check(plyr, dice)\n",
" sum[plyr]=0\n",
" for i in range(1, 7):\n",
" if str(points[plyr][i])==\"STRIKE\":\n",
" continue\n",
" else:\n",
" sum[plyr]+=points[plyr][i]\n",
" if points[plyr][1]!=0 and points[plyr][2]!=0 and points[plyr][3]!=0 and points[plyr][4]!=0 and points[plyr][5]!=0 and points[plyr][6]!=0 and sumt[plyr]==0:\n",
" sum[plyr]=0\n",
" for i in range(1, 7):\n",
" if \"STRIKE\" in str(points[plyr][i]):\n",
" continue\n",
" else:\n",
" sum[plyr]+=points[plyr][i]\n",
" if sum[plyr]>=63:\n",
" bonus[plyr]=50\n",
" sumt[plyr]=1\n",
" prints(False, plyr, dice)\n",
"import random\n",
"sum=[0, 0]\n",
"bonus=[0, 0]\n",
"tot=[0, 0]\n",
"sumt=[0, 0]\n",
"stuff={1:\"Aces\",2:\"Twos\",3:\"Threes\",4:\"Fours\",5:\"Fives\",6:\"Sixes\",7:\"Pair\",8:\"Two pairs\",9:\"Three Of A Kind\",10:\"Four Of A Kind\",11:\"Small Straight\",12:\"Large Straight\",13:\"Full House\",14:\"Chance\",15:\"YATZY\"}\n",
"points=[{1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}, {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}]\n",
"while True:\n",
" game(0)\n",
" if (points[0][1]!=0 and points[0][2]!=0 and points[0][3]!=0 and points[0][4]!=0 and points[0][5]!=0 and points[0][6]!=0\n",
" and points[0][7]!=0 and points[0][8]!=0 and points[0][9]!=0 and points[0][10]!=0 and points[0][11]!=0 and points[0][12]!=0 \n",
" and points[0][13]!=0 and points[0][14]!=0 and points[0][15]!=0 and points[1][1]!=0 and points[1][2]!=0 and points[1][3]!=0\n",
" and points[1][4]!=0 and points[1][5]!=0 and points[1][6]!=0 and points[1][7]!=0 and points[1][8]!=0 and points[1][9]!=0\n",
" and points[1][10]!=0 and points[1][11]!=0 and points[1][12]!=0 and points[1][13]!=0 and points[1][14]!=0 and points[1][15]!=0):\n",
" break\n",
" input(\"Press enter player 2\")\n",
" game(1)\n",
" if (points[0][1]!=0 and points[0][2]!=0 and points[0][3]!=0 and points[0][4]!=0 and points[0][5]!=0 and points[0][6]!=0\n",
" and points[0][7]!=0 and points[0][8]!=0 and points[0][9]!=0 and points[0][10]!=0 and points[0][11]!=0 and points[0][12]!=0 \n",
" and points[0][13]!=0 and points[0][14]!=0 and points[0][15]!=0 and points[1][1]!=0 and points[1][2]!=0 and points[1][3]!=0\n",
" and points[1][4]!=0 and points[1][5]!=0 and points[1][6]!=0 and points[1][7]!=0 and points[1][8]!=0 and points[1][9]!=0\n",
" and points[1][10]!=0 and points[1][11]!=0 and points[1][12]!=0 and points[1][13]!=0 and points[1][14]!=0 and points[1][15]!=0):\n",
" break\n",
" input(\"Press enter player 1\")\n",
" \n",
"for i in range(1, 16):\n",
" if \"STRIKE\" in str(points[0][i]):\n",
" continue\n",
" else:\n",
" tot[0]+=points[0][i]\n",
"tot[0]+=bonus[0]\n",
"for i in range(1, 16):\n",
" if \"STRIKE\" in str(points[1][i]):\n",
" continue\n",
" else:\n",
" tot[1]+=points[1][i]\n",
"tot[1]+=bonus[1]\n",
"prints(False, 0, 0)\n",
"#TODO ADD CHOICE TO PICK 1 or 2 player\n"
] | [
0.05263157894736842,
0.02857142857142857,
0.06666666666666667,
0,
0.07692307692307693,
0.04081632653061224,
0.09090909090909091,
0,
0.030303030303030304,
0,
0.037037037037037035,
0,
0,
0.04,
0,
0,
0,
0.05263157894736842,
0,
0,
0.03333333333333333,
0,
0.034482758620689655,
0,
0,
0.04,
0.04,
0.047619047619047616,
0.03225806451612903,
0,
0.06666666666666667,
0.05263157894736842,
0.04,
0.04054054054054054,
0.022222222222222223,
0.04081632653061224,
0.038461538461538464,
0.04054054054054054,
0.022727272727272728,
0,
0.043478260869565216,
0,
0,
0,
0.04,
0,
0,
0.04,
0,
0.023809523809523808,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.06666666666666667,
0,
0.02564102564102564,
0.030303030303030304,
0.02564102564102564,
0.022727272727272728,
0.0625,
0.02127659574468085,
0.02,
0,
0,
0.023255813953488372,
0.022727272727272728,
0.02702702702702703,
0.023255813953488372,
0.02631578947368421,
0.023255813953488372,
0.02631578947368421,
0.023255813953488372,
0,
0.02564102564102564,
0.022727272727272728,
0.0625,
0.03125,
0.02,
0,
0,
0.023255813953488372,
0.022222222222222223,
0.0625,
0.04938271604938271,
0.02,
0,
0,
0.023255813953488372,
0.022222222222222223,
0.05309734513274336,
0.00980392156862745,
0,
0.02564102564102564,
0.022222222222222223,
0.05309734513274336,
0.00980392156862745,
0,
0.02564102564102564,
0.022222222222222223,
0.0449438202247191,
0.00980392156862745,
0.04395604395604396,
0.00980392156862745,
0,
0.02564102564102564,
0.022222222222222223,
0.020833333333333332,
0.022222222222222223,
0.05813953488372093,
0.030303030303030304,
0,
0.02564102564102564,
0,
0,
0.046511627906976744,
0.023255813953488372,
0.02040816326530612,
0,
0,
0,
0.0625,
0.09259259259259259,
0,
0,
0,
0.008333333333333333,
0,
0,
0.0625,
0,
0.023255813953488372,
0,
0,
0.02564102564102564,
0.05,
0.05,
0,
0,
0,
0,
0.023255813953488372,
0.038461538461538464,
0.037037037037037035,
0.047619047619047616,
0,
0.14285714285714285,
0.09090909090909091,
0.07692307692307693,
0.09090909090909091,
0.08333333333333333,
0.14832535885167464,
0.410958904109589,
0,
0,
0.056451612903225805,
0.0703125,
0.07751937984496124,
0.072,
0.06060606060606061,
0,
0,
0,
0.056451612903225805,
0.0703125,
0.07751937984496124,
0.072,
0.06060606060606061,
0,
0,
0.2,
0,
0,
0,
0,
0.034482758620689655,
0.058823529411764705,
0,
0,
0,
0,
0.034482758620689655,
0.058823529411764705,
0,
0.02564102564102564
] | 211 | 0.029587 |
"""Manage host learning on VLANs."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from faucet import valve_of
class ValveHostManager(object):
"""Manage host learning on VLANs."""
# don't update host cache more often than this many seconds
CACHE_UPDATE_GUARD_TIME = 2
def __init__(self, logger, ports, vlans, eth_src_table, eth_dst_table,
learn_timeout, learn_jitter, learn_ban_timeout, low_priority, host_priority):
self.logger = logger
self.ports = ports
self.vlans = vlans
self.eth_src_table = eth_src_table
self.eth_dst_table = eth_dst_table
self.learn_timeout = learn_timeout
self.learn_jitter = learn_jitter
self.learn_ban_timeout = learn_ban_timeout
self.low_priority = low_priority
self.host_priority = host_priority
def ban_rules(self, pkt_meta):
"""Limit learning to a maximum configured on this port/VLAN.
Args:
pkt_meta: PacketMeta instance.
Returns:
list: OpenFlow messages, if any.
"""
ofmsgs = []
port = pkt_meta.port
eth_src = pkt_meta.eth_src
vlan = pkt_meta.vlan
entry = vlan.cached_host(eth_src)
if entry is None:
if port.max_hosts:
if port.hosts_count() == port.max_hosts:
ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(in_port=port.number)))
port.dyn_learn_ban_count += 1
self.logger.info(
'max hosts %u reached on %s, '
'temporarily banning learning on this port, '
'and not learning %s' % (
port.max_hosts, port, eth_src))
if vlan is not None and vlan.max_hosts:
hosts_count = vlan.hosts_count()
if hosts_count == vlan.max_hosts:
ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(vlan=vlan)))
vlan.dyn_learn_ban_count += 1
self.logger.info(
'max hosts %u reached on VLAN %u, '
'temporarily banning learning on this vlan, '
'and not learning %s on %s' % (
vlan.max_hosts, vlan.vid, eth_src, port))
return ofmsgs
def _temp_ban_host_learning(self, match):
return self.eth_src_table.flowdrop(
match,
priority=(self.low_priority + 1),
hard_timeout=self.learn_ban_timeout)
def delete_host_from_vlan(self, eth_src, vlan):
"""Delete a host from a VLAN."""
ofmsgs = []
ofmsgs.extend(self.eth_src_table.flowdel(
self.eth_src_table.match(vlan=vlan, eth_src=eth_src)))
ofmsgs.extend(self.eth_dst_table.flowdel(
self.eth_dst_table.match(vlan=vlan, eth_dst=eth_src)))
return ofmsgs
def expire_hosts_from_vlan(self, vlan, now):
"""Expire hosts from VLAN cache."""
expired_hosts = vlan.expire_cache_hosts(now, self.learn_timeout)
if expired_hosts:
vlan.dyn_last_time_hosts_expired = now
self.logger.info(
'%u recently active hosts on VLAN %u, expired %s' % (
vlan.hosts_count(), vlan.vid, expired_hosts))
return expired_hosts
def _jitter_learn_timeout(self):
"""Calculate jittered learning timeout to avoid synchronized host timeouts."""
return int(max(abs(
self.learn_timeout -
(self.learn_jitter / 2) + random.randint(0, self.learn_jitter)),
self.CACHE_UPDATE_GUARD_TIME))
def learn_host_timeouts(self, port):
"""Calculate flow timeouts for learning on a port."""
# hosts learned on this port never relearned
if port.permanent_learn:
learn_timeout = 0
else:
learn_timeout = self.learn_timeout
if self.learn_timeout:
learn_timeout = self._jitter_learn_timeout()
# Update datapath to no longer send packets from this mac to controller
# note the use of hard_timeout here and idle_timeout for the dst table
# this is to ensure that the source rules will always be deleted before
# any rules on the dst table. Otherwise if the dst table rule expires
# but the src table rule is still being hit intermittantly the switch
# will flood packets to that dst and not realise it needs to relearn
# the rule
# NB: Must be lower than highest priority otherwise it can match
# flows destined to controller
src_rule_idle_timeout = 0
src_rule_hard_timeout = learn_timeout
dst_rule_idle_timeout = learn_timeout
return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)
def learn_host_on_vlan_port_flows(self, port, vlan, eth_src, delete_existing,
src_rule_idle_timeout, src_rule_hard_timeout,
dst_rule_idle_timeout):
"""Return flows that implement learning a host on a port."""
ofmsgs = []
if port.permanent_learn:
# Antispoofing rule for this MAC.
ofmsgs.append(self.eth_src_table.flowdrop(
self.eth_src_table.match(vlan=vlan, eth_src=eth_src),
priority=(self.host_priority - 2)))
else:
# Delete any existing entries for MAC.
# TODO: for LAGs, don't delete entries in the same LAG.
if delete_existing:
ofmsgs.extend(self.delete_host_from_vlan(eth_src, vlan))
# Associate this MAC with source port.
src_match = self.eth_src_table.match(
in_port=port.number, vlan=vlan, eth_src=eth_src)
if port.override_output_port:
ofmsgs.append(self.eth_src_table.flowmod(
match=src_match,
priority=(self.host_priority - 1),
inst=[valve_of.apply_actions([
valve_of.output_port(port.override_output_port.number)])],
hard_timeout=src_rule_hard_timeout,
idle_timeout=src_rule_idle_timeout))
else:
ofmsgs.append(self.eth_src_table.flowmod(
match=src_match,
priority=(self.host_priority - 1),
inst=[valve_of.goto_table(self.eth_dst_table)],
hard_timeout=src_rule_hard_timeout,
idle_timeout=src_rule_idle_timeout))
# Output packets for this MAC to specified port.
ofmsgs.append(self.eth_dst_table.flowmod(
self.eth_dst_table.match(vlan=vlan, eth_dst=eth_src),
priority=self.host_priority,
inst=[valve_of.apply_actions(vlan.output_port(port))],
idle_timeout=dst_rule_idle_timeout))
# If port is in hairpin mode, install a special rule
# that outputs packets destined to this MAC back out the same
# port they came in (e.g. multiple hosts on same WiFi AP,
# and FAUCET is switching between them on the same port).
if port.hairpin:
ofmsgs.append(self.eth_dst_table.flowmod(
self.eth_dst_table.match(in_port=port.number, vlan=vlan, eth_dst=eth_src),
priority=(self.host_priority + 1),
inst=[valve_of.apply_actions(vlan.output_port(port, hairpin=True))],
idle_timeout=dst_rule_idle_timeout))
return ofmsgs
def learn_host_on_vlan_ports(self, now, port, vlan, eth_src,
delete_existing=True,
last_dp_coldstart_time=None):
"""Learn a host on a port."""
ofmsgs = []
cache_port = None
cache_age = None
entry = vlan.cached_host(eth_src)
# Host not cached, and no hosts expired since we cold started
# Enable faster learning by assuming there's no previous host to delete
if entry is None:
if (last_dp_coldstart_time and
(vlan.dyn_last_time_hosts_expired is None or
vlan.dyn_last_time_hosts_expired < last_dp_coldstart_time)):
delete_existing = False
else:
cache_age = now - entry.cache_time
cache_port = entry.port
if cache_port == port:
# skip delete if host didn't change ports.
delete_existing = False
# if we very very recently learned this host, don't do anything.
if cache_age < self.CACHE_UPDATE_GUARD_TIME:
return (ofmsgs, cache_port)
if port.loop_protect:
ban_age = None
learn_ban = False
# if recently in loop protect mode and still receiving packets,
# prolong the ban
if port.dyn_last_ban_time:
ban_age = now - port.dyn_last_ban_time
if ban_age < self.CACHE_UPDATE_GUARD_TIME:
learn_ban = True
# if not in protect mode and we get a rapid move, enact protect mode
if not learn_ban and entry is not None:
if port != cache_port and cache_age < self.CACHE_UPDATE_GUARD_TIME:
learn_ban = True
port.dyn_learn_ban_count += 1
self.logger.info('rapid move of %s from %s to %s, temp loop ban %s' % (
eth_src, cache_port, port, port))
# already, or newly in protect mode, apply the ban rules.
if learn_ban:
port.dyn_last_ban_time = now
ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(in_port=port.number)))
return (ofmsgs, cache_port)
(src_rule_idle_timeout,
src_rule_hard_timeout,
dst_rule_idle_timeout) = self.learn_host_timeouts(port)
ofmsgs.extend(self.learn_host_on_vlan_port_flows(
port, vlan, eth_src, delete_existing,
src_rule_idle_timeout, src_rule_hard_timeout,
dst_rule_idle_timeout))
vlan.add_cache_host(eth_src, port, now)
return (ofmsgs, cache_port)
def flow_timeout(self, _now, _table_id, _match):
"""Handle a flow timed out message from dataplane."""
return []
class ValveHostFlowRemovedManager(ValveHostManager):
"""Trigger relearning on flow removed notifications.
.. note::
not currently reliable.
"""
def flow_timeout(self, now, table_id, match):
ofmsgs = []
if table_id in (self.eth_src_table.table_id, self.eth_dst_table.table_id):
if 'vlan_vid' in match:
vlan = self.vlans[valve_of.devid_present(match['vlan_vid'])]
in_port = None
eth_src = None
eth_dst = None
for field, value in list(match.items()):
if field == 'in_port':
in_port = value
elif field == 'eth_src':
eth_src = value
elif field == 'eth_dst':
eth_dst = value
if eth_src and in_port:
port = self.ports[in_port]
ofmsgs.extend(self._src_rule_expire(vlan, port, eth_src))
elif eth_dst:
ofmsgs.extend(self._dst_rule_expire(now, vlan, eth_dst))
return ofmsgs
def expire_hosts_from_vlan(self, _vlan, _now):
return []
def learn_host_timeouts(self, port):
"""Calculate flow timeouts for learning on a port."""
# hosts learned on this port never relearned
if port.permanent_learn:
learn_timeout = 0
else:
learn_timeout = self._jitter_learn_timeout()
# Disable hard_time, dst rule expires after src rule.
src_rule_idle_timeout = learn_timeout
src_rule_hard_timeout = 0
dst_rule_idle_timeout = learn_timeout + self.CACHE_UPDATE_GUARD_TIME
return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)
def _src_rule_expire(self, vlan, port, eth_src):
"""When a src rule expires, the host is probably inactive or active in
receiving but not sending. We mark just mark the host as expired."""
ofmsgs = []
entry = vlan.cached_host_on_port(eth_src, port)
if entry is not None:
vlan.expire_cache_host(eth_src)
self.logger.info('expired src_rule for host %s' % eth_src)
return ofmsgs
def _dst_rule_expire(self, now, vlan, eth_dst):
"""Expiring a dst rule may indicate that the host is actively sending
traffic but not receving. If the src rule not yet expires, we reinstall
host rules."""
ofmsgs = []
entry = vlan.cached_host(eth_dst)
if entry is not None:
ofmsgs.extend(self.learn_host_on_vlan_ports(
now, entry.port, vlan, eth_dst, delete_existing=False))
self.logger.info(
'refreshing host %s from VLAN %u' % (eth_dst, vlan.vid))
return ofmsgs
| [
"\"\"\"Manage host learning on VLANs.\"\"\"\n",
"\n",
"# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.\n",
"# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.\n",
"# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.\n",
"# Copyright (C) 2015--2018 The Contributors\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"import random\n",
"\n",
"from faucet import valve_of\n",
"\n",
"\n",
"class ValveHostManager(object):\n",
" \"\"\"Manage host learning on VLANs.\"\"\"\n",
"\n",
" # don't update host cache more often than this many seconds\n",
" CACHE_UPDATE_GUARD_TIME = 2\n",
"\n",
" def __init__(self, logger, ports, vlans, eth_src_table, eth_dst_table,\n",
" learn_timeout, learn_jitter, learn_ban_timeout, low_priority, host_priority):\n",
" self.logger = logger\n",
" self.ports = ports\n",
" self.vlans = vlans\n",
" self.eth_src_table = eth_src_table\n",
" self.eth_dst_table = eth_dst_table\n",
" self.learn_timeout = learn_timeout\n",
" self.learn_jitter = learn_jitter\n",
" self.learn_ban_timeout = learn_ban_timeout\n",
" self.low_priority = low_priority\n",
" self.host_priority = host_priority\n",
"\n",
" def ban_rules(self, pkt_meta):\n",
" \"\"\"Limit learning to a maximum configured on this port/VLAN.\n",
"\n",
" Args:\n",
" pkt_meta: PacketMeta instance.\n",
" Returns:\n",
" list: OpenFlow messages, if any.\n",
" \"\"\"\n",
" ofmsgs = []\n",
"\n",
" port = pkt_meta.port\n",
" eth_src = pkt_meta.eth_src\n",
" vlan = pkt_meta.vlan\n",
"\n",
" entry = vlan.cached_host(eth_src)\n",
" if entry is None:\n",
" if port.max_hosts:\n",
" if port.hosts_count() == port.max_hosts:\n",
" ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(in_port=port.number)))\n",
" port.dyn_learn_ban_count += 1\n",
" self.logger.info(\n",
" 'max hosts %u reached on %s, '\n",
" 'temporarily banning learning on this port, '\n",
" 'and not learning %s' % (\n",
" port.max_hosts, port, eth_src))\n",
" if vlan is not None and vlan.max_hosts:\n",
" hosts_count = vlan.hosts_count()\n",
" if hosts_count == vlan.max_hosts:\n",
" ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(vlan=vlan)))\n",
" vlan.dyn_learn_ban_count += 1\n",
" self.logger.info(\n",
" 'max hosts %u reached on VLAN %u, '\n",
" 'temporarily banning learning on this vlan, '\n",
" 'and not learning %s on %s' % (\n",
" vlan.max_hosts, vlan.vid, eth_src, port))\n",
" return ofmsgs\n",
"\n",
" def _temp_ban_host_learning(self, match):\n",
" return self.eth_src_table.flowdrop(\n",
" match,\n",
" priority=(self.low_priority + 1),\n",
" hard_timeout=self.learn_ban_timeout)\n",
"\n",
" def delete_host_from_vlan(self, eth_src, vlan):\n",
" \"\"\"Delete a host from a VLAN.\"\"\"\n",
" ofmsgs = []\n",
" ofmsgs.extend(self.eth_src_table.flowdel(\n",
" self.eth_src_table.match(vlan=vlan, eth_src=eth_src)))\n",
" ofmsgs.extend(self.eth_dst_table.flowdel(\n",
" self.eth_dst_table.match(vlan=vlan, eth_dst=eth_src)))\n",
" return ofmsgs\n",
"\n",
" def expire_hosts_from_vlan(self, vlan, now):\n",
" \"\"\"Expire hosts from VLAN cache.\"\"\"\n",
" expired_hosts = vlan.expire_cache_hosts(now, self.learn_timeout)\n",
" if expired_hosts:\n",
" vlan.dyn_last_time_hosts_expired = now\n",
" self.logger.info(\n",
" '%u recently active hosts on VLAN %u, expired %s' % (\n",
" vlan.hosts_count(), vlan.vid, expired_hosts))\n",
" return expired_hosts\n",
"\n",
" def _jitter_learn_timeout(self):\n",
" \"\"\"Calculate jittered learning timeout to avoid synchronized host timeouts.\"\"\"\n",
" return int(max(abs(\n",
" self.learn_timeout -\n",
" (self.learn_jitter / 2) + random.randint(0, self.learn_jitter)),\n",
" self.CACHE_UPDATE_GUARD_TIME))\n",
"\n",
" def learn_host_timeouts(self, port):\n",
" \"\"\"Calculate flow timeouts for learning on a port.\"\"\"\n",
" # hosts learned on this port never relearned\n",
" if port.permanent_learn:\n",
" learn_timeout = 0\n",
" else:\n",
" learn_timeout = self.learn_timeout\n",
" if self.learn_timeout:\n",
" learn_timeout = self._jitter_learn_timeout()\n",
"\n",
" # Update datapath to no longer send packets from this mac to controller\n",
" # note the use of hard_timeout here and idle_timeout for the dst table\n",
" # this is to ensure that the source rules will always be deleted before\n",
" # any rules on the dst table. Otherwise if the dst table rule expires\n",
" # but the src table rule is still being hit intermittantly the switch\n",
" # will flood packets to that dst and not realise it needs to relearn\n",
" # the rule\n",
" # NB: Must be lower than highest priority otherwise it can match\n",
" # flows destined to controller\n",
" src_rule_idle_timeout = 0\n",
" src_rule_hard_timeout = learn_timeout\n",
" dst_rule_idle_timeout = learn_timeout\n",
" return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)\n",
"\n",
" def learn_host_on_vlan_port_flows(self, port, vlan, eth_src, delete_existing,\n",
" src_rule_idle_timeout, src_rule_hard_timeout,\n",
" dst_rule_idle_timeout):\n",
" \"\"\"Return flows that implement learning a host on a port.\"\"\"\n",
" ofmsgs = []\n",
"\n",
" if port.permanent_learn:\n",
" # Antispoofing rule for this MAC.\n",
" ofmsgs.append(self.eth_src_table.flowdrop(\n",
" self.eth_src_table.match(vlan=vlan, eth_src=eth_src),\n",
" priority=(self.host_priority - 2)))\n",
" else:\n",
" # Delete any existing entries for MAC.\n",
" # TODO: for LAGs, don't delete entries in the same LAG.\n",
" if delete_existing:\n",
" ofmsgs.extend(self.delete_host_from_vlan(eth_src, vlan))\n",
"\n",
" # Associate this MAC with source port.\n",
" src_match = self.eth_src_table.match(\n",
" in_port=port.number, vlan=vlan, eth_src=eth_src)\n",
" if port.override_output_port:\n",
" ofmsgs.append(self.eth_src_table.flowmod(\n",
" match=src_match,\n",
" priority=(self.host_priority - 1),\n",
" inst=[valve_of.apply_actions([\n",
" valve_of.output_port(port.override_output_port.number)])],\n",
" hard_timeout=src_rule_hard_timeout,\n",
" idle_timeout=src_rule_idle_timeout))\n",
" else:\n",
" ofmsgs.append(self.eth_src_table.flowmod(\n",
" match=src_match,\n",
" priority=(self.host_priority - 1),\n",
" inst=[valve_of.goto_table(self.eth_dst_table)],\n",
" hard_timeout=src_rule_hard_timeout,\n",
" idle_timeout=src_rule_idle_timeout))\n",
"\n",
" # Output packets for this MAC to specified port.\n",
" ofmsgs.append(self.eth_dst_table.flowmod(\n",
" self.eth_dst_table.match(vlan=vlan, eth_dst=eth_src),\n",
" priority=self.host_priority,\n",
" inst=[valve_of.apply_actions(vlan.output_port(port))],\n",
" idle_timeout=dst_rule_idle_timeout))\n",
"\n",
" # If port is in hairpin mode, install a special rule\n",
" # that outputs packets destined to this MAC back out the same\n",
" # port they came in (e.g. multiple hosts on same WiFi AP,\n",
" # and FAUCET is switching between them on the same port).\n",
" if port.hairpin:\n",
" ofmsgs.append(self.eth_dst_table.flowmod(\n",
" self.eth_dst_table.match(in_port=port.number, vlan=vlan, eth_dst=eth_src),\n",
" priority=(self.host_priority + 1),\n",
" inst=[valve_of.apply_actions(vlan.output_port(port, hairpin=True))],\n",
" idle_timeout=dst_rule_idle_timeout))\n",
"\n",
" return ofmsgs\n",
"\n",
" def learn_host_on_vlan_ports(self, now, port, vlan, eth_src,\n",
" delete_existing=True,\n",
" last_dp_coldstart_time=None):\n",
" \"\"\"Learn a host on a port.\"\"\"\n",
" ofmsgs = []\n",
" cache_port = None\n",
" cache_age = None\n",
" entry = vlan.cached_host(eth_src)\n",
" # Host not cached, and no hosts expired since we cold started\n",
" # Enable faster learning by assuming there's no previous host to delete\n",
" if entry is None:\n",
" if (last_dp_coldstart_time and\n",
" (vlan.dyn_last_time_hosts_expired is None or\n",
" vlan.dyn_last_time_hosts_expired < last_dp_coldstart_time)):\n",
" delete_existing = False\n",
" else:\n",
" cache_age = now - entry.cache_time\n",
" cache_port = entry.port\n",
"\n",
" if cache_port == port:\n",
" # skip delete if host didn't change ports.\n",
" delete_existing = False\n",
" # if we very very recently learned this host, don't do anything.\n",
" if cache_age < self.CACHE_UPDATE_GUARD_TIME:\n",
" return (ofmsgs, cache_port)\n",
"\n",
" if port.loop_protect:\n",
" ban_age = None\n",
" learn_ban = False\n",
"\n",
" # if recently in loop protect mode and still receiving packets,\n",
" # prolong the ban\n",
" if port.dyn_last_ban_time:\n",
" ban_age = now - port.dyn_last_ban_time\n",
" if ban_age < self.CACHE_UPDATE_GUARD_TIME:\n",
" learn_ban = True\n",
"\n",
" # if not in protect mode and we get a rapid move, enact protect mode\n",
" if not learn_ban and entry is not None:\n",
" if port != cache_port and cache_age < self.CACHE_UPDATE_GUARD_TIME:\n",
" learn_ban = True\n",
" port.dyn_learn_ban_count += 1\n",
" self.logger.info('rapid move of %s from %s to %s, temp loop ban %s' % (\n",
" eth_src, cache_port, port, port))\n",
"\n",
" # already, or newly in protect mode, apply the ban rules.\n",
" if learn_ban:\n",
" port.dyn_last_ban_time = now\n",
" ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(in_port=port.number)))\n",
" return (ofmsgs, cache_port)\n",
"\n",
" (src_rule_idle_timeout,\n",
" src_rule_hard_timeout,\n",
" dst_rule_idle_timeout) = self.learn_host_timeouts(port)\n",
"\n",
" ofmsgs.extend(self.learn_host_on_vlan_port_flows(\n",
" port, vlan, eth_src, delete_existing,\n",
" src_rule_idle_timeout, src_rule_hard_timeout,\n",
" dst_rule_idle_timeout))\n",
"\n",
" vlan.add_cache_host(eth_src, port, now)\n",
" return (ofmsgs, cache_port)\n",
"\n",
" def flow_timeout(self, _now, _table_id, _match):\n",
" \"\"\"Handle a flow timed out message from dataplane.\"\"\"\n",
" return []\n",
"\n",
"\n",
"class ValveHostFlowRemovedManager(ValveHostManager):\n",
" \"\"\"Trigger relearning on flow removed notifications.\n",
"\n",
" .. note::\n",
"\n",
" not currently reliable.\n",
" \"\"\"\n",
"\n",
" def flow_timeout(self, now, table_id, match):\n",
" ofmsgs = []\n",
" if table_id in (self.eth_src_table.table_id, self.eth_dst_table.table_id):\n",
" if 'vlan_vid' in match:\n",
" vlan = self.vlans[valve_of.devid_present(match['vlan_vid'])]\n",
" in_port = None\n",
" eth_src = None\n",
" eth_dst = None\n",
" for field, value in list(match.items()):\n",
" if field == 'in_port':\n",
" in_port = value\n",
" elif field == 'eth_src':\n",
" eth_src = value\n",
" elif field == 'eth_dst':\n",
" eth_dst = value\n",
" if eth_src and in_port:\n",
" port = self.ports[in_port]\n",
" ofmsgs.extend(self._src_rule_expire(vlan, port, eth_src))\n",
" elif eth_dst:\n",
" ofmsgs.extend(self._dst_rule_expire(now, vlan, eth_dst))\n",
" return ofmsgs\n",
"\n",
" def expire_hosts_from_vlan(self, _vlan, _now):\n",
" return []\n",
"\n",
" def learn_host_timeouts(self, port):\n",
" \"\"\"Calculate flow timeouts for learning on a port.\"\"\"\n",
" # hosts learned on this port never relearned\n",
" if port.permanent_learn:\n",
" learn_timeout = 0\n",
" else:\n",
" learn_timeout = self._jitter_learn_timeout()\n",
"\n",
" # Disable hard_time, dst rule expires after src rule.\n",
" src_rule_idle_timeout = learn_timeout\n",
" src_rule_hard_timeout = 0\n",
" dst_rule_idle_timeout = learn_timeout + self.CACHE_UPDATE_GUARD_TIME\n",
" return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)\n",
"\n",
" def _src_rule_expire(self, vlan, port, eth_src):\n",
" \"\"\"When a src rule expires, the host is probably inactive or active in\n",
" receiving but not sending. We mark just mark the host as expired.\"\"\"\n",
" ofmsgs = []\n",
" entry = vlan.cached_host_on_port(eth_src, port)\n",
" if entry is not None:\n",
" vlan.expire_cache_host(eth_src)\n",
" self.logger.info('expired src_rule for host %s' % eth_src)\n",
" return ofmsgs\n",
"\n",
" def _dst_rule_expire(self, now, vlan, eth_dst):\n",
" \"\"\"Expiring a dst rule may indicate that the host is actively sending\n",
" traffic but not receving. If the src rule not yet expires, we reinstall\n",
" host rules.\"\"\"\n",
" ofmsgs = []\n",
" entry = vlan.cached_host(eth_dst)\n",
" if entry is not None:\n",
" ofmsgs.extend(self.learn_host_on_vlan_ports(\n",
" now, entry.port, vlan, eth_dst, delete_existing=False))\n",
" self.logger.info(\n",
" 'refreshing host %s from VLAN %u' % (eth_dst, vlan.vid))\n",
" return ofmsgs\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.012195121951219513,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0.011904761904761904,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 329 | 0.000547 |
#!/usr/bin/python
# encoding=utf-8
#!coding:utf-8
import re
import sys
import urllib2
import argparse
import commands
import os
import subprocess
if __name__ == "__main__" :
# 测试正则表达式
reload(sys)
sys.setdefaultencoding("utf-8")
if len(sys.argv) > 1: # 如果在程序运行时,传递了命令行参数
# 打印传递的命令行参数的信息
print "您输入的所有参数共 %d 个,信息为 sys.argv = %s" % (len(sys.argv), sys.argv)
for i, eachArg in enumerate(sys.argv):
print "[%d] = %s" % (i, eachArg)
else:
print "Useage : read.py -r shell..."
exit(0)
# 创建一个解析对象
# 然后向该对象中添加你要关注的命令行参数和选项
# 每一个add_argument方法对应一个你要关注的参数或选项
# 最后调用parse_args()方法进行解析
# 解析成功之后即可使用
parser = argparse.ArgumentParser( )
parser.add_argument("-r", "--run", dest = "shell_parser", help = "The file you want to read...")
args = parser.parse_args( )
shell = args.shell_parser
try:
# 首先执行shell脚本,并读取到返回值的信息
#(status, output) = commands.getstatusoutput("ls")
#print status, output
#output = os.popen('ls')
#print output.read()
#print subprocess.call(["ls","-al"])
#handle = subprocess.Popen("ls", shell=True, stdout=subprocess.PIPE)
#print handle.communicate()[0]
output = subprocess.check_output("ls")
#print all_text.decode("utf-8")
#print all_text
#匹配的信息如下
output = """
[ 2016-1-26 23:25:52]Process 18497 exited with code 0
[ 2016-1-26 23:26:5]Process 18556 termed with signal 11(SIGSEGV)"""
reStr = r'.*?(\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}:\d{1,2})]Process (\d{1,5}) (exited with code \d|termed with signal \d{1,2}\((.*?)\))'
#item[0] -=> [ 2016-1-26 23:25:52]
#item[1] -=> pid
#item[2] -=> Process 18497 exited with code 0 | Process 18556 termed with signal 11(SIGSEGV)
#item[3] -=> "" | SIGSEGV
non_exception = 0
sigsegv_exception = 0
pattern = re.compile(reStr, re.S)
myItems = re.findall(pattern, output)
print len(myItems)
#print myItems
for item in myItems:
#print item
if item[3] == "" :
non_exception += 1
elif item[3] == "SIGSEGV":
sigsegv_exception += 1
print "无异常", non_exception
print "SIGSEGV", sigsegv_exception
if non_exception == 1 :
print "0"
else :
print "1", item[3]
finally:
pass
| [
"#!/usr/bin/python\r\n",
"# encoding=utf-8\r\n",
"\r\n",
"\r\n",
"#!coding:utf-8\r\n",
"\r\n",
"import re\r\n",
"import sys\r\n",
"import urllib2\r\n",
"import argparse\r\n",
"\r\n",
"import commands\r\n",
"import os\r\n",
"import subprocess\r\n",
"\r\n",
"if __name__ == \"__main__\" :\r\n",
" # 测试正则表达式\r\n",
"\r\n",
" reload(sys)\r\n",
" sys.setdefaultencoding(\"utf-8\")\r\n",
" if len(sys.argv) > 1: # 如果在程序运行时,传递了命令行参数\r\n",
"\r\n",
" # 打印传递的命令行参数的信息\r\n",
" print \"您输入的所有参数共 %d 个,信息为 sys.argv = %s\" % (len(sys.argv), sys.argv)\r\n",
"\r\n",
" for i, eachArg in enumerate(sys.argv):\r\n",
"\r\n",
" print \"[%d] = %s\" % (i, eachArg)\r\n",
" else:\r\n",
" \tprint \"Useage : read.py -r shell...\"\r\n",
" \texit(0)\r\n",
"\r\n",
" # 创建一个解析对象\r\n",
" # 然后向该对象中添加你要关注的命令行参数和选项\r\n",
" # 每一个add_argument方法对应一个你要关注的参数或选项\r\n",
" # 最后调用parse_args()方法进行解析\r\n",
" # 解析成功之后即可使用\r\n",
"\r\n",
" parser = argparse.ArgumentParser( )\r\n",
" parser.add_argument(\"-r\", \"--run\", dest = \"shell_parser\", help = \"The file you want to read...\")\r\n",
" args = parser.parse_args( )\r\n",
"\r\n",
" shell = args.shell_parser\r\n",
"\r\n",
"\r\n",
"\r\n",
" try:\r\n",
" # 首先执行shell脚本,并读取到返回值的信息\r\n",
" #(status, output) = commands.getstatusoutput(\"ls\")\r\n",
" #print status, output\r\n",
"\r\n",
" #output = os.popen('ls')\r\n",
" #print output.read()\r\n",
"\r\n",
" #print subprocess.call([\"ls\",\"-al\"])\r\n",
"\r\n",
" #handle = subprocess.Popen(\"ls\", shell=True, stdout=subprocess.PIPE)\r\n",
" #print handle.communicate()[0]\r\n",
" output = subprocess.check_output(\"ls\")\r\n",
"\r\n",
" #print all_text.decode(\"utf-8\")\r\n",
" #print all_text\r\n",
"\r\n",
" #匹配的信息如下\r\n",
" output = \"\"\"\r\n",
" [ 2016-1-26 23:25:52]Process 18497 exited with code 0\r\n",
" [ 2016-1-26 23:26:5]Process 18556 termed with signal 11(SIGSEGV)\"\"\"\r\n",
"\r\n",
" reStr = r'.*?(\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2})]Process (\\d{1,5}) (exited with code \\d|termed with signal \\d{1,2}\\((.*?)\\))'\r\n",
" #item[0] -=> [ 2016-1-26 23:25:52]\r\n",
" #item[1] -=> pid\r\n",
" #item[2] -=> Process 18497 exited with code 0 | Process 18556 termed with signal 11(SIGSEGV)\r\n",
" #item[3] -=> \"\" | SIGSEGV\r\n",
"\r\n",
" non_exception = 0\r\n",
" sigsegv_exception = 0\r\n",
" pattern = re.compile(reStr, re.S)\r\n",
" myItems = re.findall(pattern, output)\r\n",
" print len(myItems)\r\n",
" #print myItems\r\n",
" for item in myItems:\r\n",
" \t#print item\r\n",
" \tif item[3] == \"\" :\r\n",
" \t\tnon_exception += 1\r\n",
" \telif item[3] == \"SIGSEGV\":\r\n",
" \t\tsigsegv_exception += 1\r\n",
"\r\n",
" print \"无异常\", non_exception\r\n",
" print \"SIGSEGV\", sigsegv_exception\r\n",
"\r\n",
" if non_exception == 1 :\r\n",
" print \"0\"\r\n",
" else :\r\n",
" print \"1\", item[3]\r\n",
" finally:\r\n",
" \tpass\r\n"
] | [
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0.046511627906976744,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0.049019607843137254,
0.030303030303030304,
0,
0,
0,
0,
0,
0.1,
0,
0.016666666666666666,
0.03225806451612903,
0,
0.029411764705882353,
0.03333333333333333,
0,
0.021739130434782608,
0,
0.01282051282051282,
0.025,
0,
0,
0.024390243902439025,
0.04,
0,
0.05555555555555555,
0,
0,
0,
0,
0.006802721088435374,
0.022727272727272728,
0.038461538461538464,
0.0196078431372549,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0.18181818181818182,
0.13793103448275862,
0.1,
0.05405405405405406,
0.08823529411764706,
0,
0,
0,
0,
0.030303030303030304,
0,
0.0625,
0,
0,
0.18181818181818182
] | 96 | 0.018665 |
"""
Created on 10 Jan 2017
@author: Bruno Beloff ([email protected])
https://www.nmea.org
https://en.wikipedia.org/wiki/NMEA_0183
reference coordinate systems:
https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84
https://en.wikipedia.org/wiki/PZ-90
"""
from collections import OrderedDict
from numbers import Number
from scs_core.data.datum import Datum
from scs_core.data.json import JSONReport
from scs_core.position.position import Position
# --------------------------------------------------------------------------------------------------------------------
class GPSDatum(JSONReport):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, default=True):
if not jdict:
return cls.null_datum()
pos = Position.construct_from_jdict(jdict.get('pos'))
elv = jdict.get('elv')
quality = jdict.get('qual')
return GPSDatum(pos, elv, quality)
@classmethod
def construct_from_gga(cls, gga):
if gga is None:
return None
pos = Position.construct_from_gga(gga)
elv = None if gga.alt is None else round(gga.alt)
quality = gga.quality
return GPSDatum(pos, elv, quality)
@classmethod
def null_datum(cls):
return GPSDatum(Position(None, None), None, None)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, pos, elv, quality):
"""
Constructor
"""
self.__pos = pos # Position
self.__elv = Datum.float(elv, 1) # metres above mean sea level
self.__quality = quality # number or None
# ----------------------------------------------------------------------------------------------------------------
# Support for averaging...
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(other)
pos = self.pos + other.pos
elv = self.elv + other.elv
quality = self.quality + other.quality
return GPSDatum(pos, elv, quality)
def __truediv__(self, other):
if not isinstance(other, Number):
raise TypeError(other)
pos = self.pos / other
elv = self.elv / other
quality = self.quality / other
return GPSDatum(pos, elv, quality)
# ----------------------------------------------------------------------------------------------------------------
def distance(self, other_pos, minimum_acceptable_quality=None):
if self.pos is None:
return None
if minimum_acceptable_quality is not None:
if self.quality is None or round(self.quality) < minimum_acceptable_quality:
return None
return self.pos.distance(other_pos)
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['pos'] = self.pos
jdict['elv'] = None if self.elv is None else round(self.elv, 1)
jdict['qual'] = None if self.quality is None else round(self.quality, 1)
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def pos(self):
return self.__pos
@property
def elv(self):
return self.__elv
@property
def quality(self):
return self.__quality
@quality.setter
def quality(self, quality):
self.__quality = quality
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "GPSDatum:{pos:%s, elv:%s, quality:%s}" % (self.pos, self.elv, self.quality)
| [
"\"\"\"\n",
"Created on 10 Jan 2017\n",
"\n",
"@author: Bruno Beloff ([email protected])\n",
"\n",
"https://www.nmea.org\n",
"https://en.wikipedia.org/wiki/NMEA_0183\n",
"\n",
"reference coordinate systems:\n",
"https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84\n",
"https://en.wikipedia.org/wiki/PZ-90\n",
"\"\"\"\n",
"\n",
"from collections import OrderedDict\n",
"from numbers import Number\n",
"\n",
"from scs_core.data.datum import Datum\n",
"from scs_core.data.json import JSONReport\n",
"\n",
"from scs_core.position.position import Position\n",
"\n",
"\n",
"# --------------------------------------------------------------------------------------------------------------------\n",
"\n",
"class GPSDatum(JSONReport):\n",
" \"\"\"\n",
" classdocs\n",
" \"\"\"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" @classmethod\n",
" def construct_from_jdict(cls, jdict, default=True):\n",
" if not jdict:\n",
" return cls.null_datum()\n",
"\n",
" pos = Position.construct_from_jdict(jdict.get('pos'))\n",
" elv = jdict.get('elv')\n",
"\n",
" quality = jdict.get('qual')\n",
"\n",
" return GPSDatum(pos, elv, quality)\n",
"\n",
"\n",
" @classmethod\n",
" def construct_from_gga(cls, gga):\n",
" if gga is None:\n",
" return None\n",
"\n",
" pos = Position.construct_from_gga(gga)\n",
" elv = None if gga.alt is None else round(gga.alt)\n",
"\n",
" quality = gga.quality\n",
"\n",
" return GPSDatum(pos, elv, quality)\n",
"\n",
"\n",
" @classmethod\n",
" def null_datum(cls):\n",
" return GPSDatum(Position(None, None), None, None)\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" def __init__(self, pos, elv, quality):\n",
" \"\"\"\n",
" Constructor\n",
" \"\"\"\n",
" self.__pos = pos # Position\n",
" self.__elv = Datum.float(elv, 1) # metres above mean sea level\n",
"\n",
" self.__quality = quality # number or None\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
" # Support for averaging...\n",
"\n",
" def __add__(self, other):\n",
" if not isinstance(other, self.__class__):\n",
" raise TypeError(other)\n",
"\n",
" pos = self.pos + other.pos\n",
" elv = self.elv + other.elv\n",
"\n",
" quality = self.quality + other.quality\n",
"\n",
" return GPSDatum(pos, elv, quality)\n",
"\n",
"\n",
" def __truediv__(self, other):\n",
" if not isinstance(other, Number):\n",
" raise TypeError(other)\n",
"\n",
" pos = self.pos / other\n",
" elv = self.elv / other\n",
"\n",
" quality = self.quality / other\n",
"\n",
" return GPSDatum(pos, elv, quality)\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" def distance(self, other_pos, minimum_acceptable_quality=None):\n",
" if self.pos is None:\n",
" return None\n",
"\n",
" if minimum_acceptable_quality is not None:\n",
" if self.quality is None or round(self.quality) < minimum_acceptable_quality:\n",
" return None\n",
"\n",
" return self.pos.distance(other_pos)\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" def as_json(self):\n",
" jdict = OrderedDict()\n",
"\n",
" jdict['pos'] = self.pos\n",
" jdict['elv'] = None if self.elv is None else round(self.elv, 1)\n",
"\n",
" jdict['qual'] = None if self.quality is None else round(self.quality, 1)\n",
"\n",
" return jdict\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" @property\n",
" def pos(self):\n",
" return self.__pos\n",
"\n",
"\n",
" @property\n",
" def elv(self):\n",
" return self.__elv\n",
"\n",
"\n",
" @property\n",
" def quality(self):\n",
" return self.__quality\n",
"\n",
"\n",
" @quality.setter\n",
" def quality(self, quality):\n",
" self.__quality = quality\n",
"\n",
"\n",
" # ----------------------------------------------------------------------------------------------------------------\n",
"\n",
" def __str__(self, *args, **kwargs):\n",
" return \"GPSDatum:{pos:%s, elv:%s, quality:%s}\" % (self.pos, self.elv, self.quality)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.008403361344537815,
0,
0.023255813953488372,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0.014705882352941176,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.008403361344537815,
0,
0.07142857142857142,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0.008403361344537815,
0,
0.025,
0.010869565217391304
] | 153 | 0.004236 |
"""Plugs are ins and outs for Nodes through which they exchange data."""
from __future__ import print_function
import sys
import warnings
from abc import abstractmethod
from .utilities import get_hash
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
if sys.version_info.major > 2: # pragma: no cover
basestring = str
class IPlug(object):
"""The interface for the plugs.
Plugs are associated with a Node and can be connected, disconnected
and hold a value, that can be accesses by the associated Node.
"""
def __init__(self, name, node):
"""Initialize the Interface.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
if '.' in name and not isinstance(self, SubPlug):
raise ValueError(
'Names for plugs can not contain dots "." as these are '
'reserved to identify sub plugs.')
self.name = name
self.node = node
self.connections = []
self._sub_plugs = OrderedDict()
self._value = None
self._is_dirty = True
def __rshift__(self, other):
"""Create a connection to the given IPlug.
Args:
other (IPlug): The IPlug to connect to.
"""
warnings.warn("Use the connect method instead",
DeprecationWarning, stacklevel=2)
self.connect(other)
def __lshift__(self, other):
"""Break a connection to the given IPlug.
Args:
other (IPlug): The IPlug to disconnect.
"""
warnings.warn("Use the disconnect method instead",
DeprecationWarning, stacklevel=2)
self.disconnect(other)
# Extra function to make re-use in subclasses easier
def _update_value(self, value):
"""Update the internal value."""
old_hash = get_hash(self._value)
new_hash = get_hash(value)
self._value = value
if old_hash is None or new_hash is None or (old_hash != new_hash):
self.is_dirty = True
@property
def value(self):
"""Access to the value on this Plug."""
if self._sub_plugs:
return {name: plug.value for name, plug in self._sub_plugs.items()}
return self._value
@value.setter
def value(self, value):
"""Set the Plug dirty when the value is being changed."""
self._update_value(value)
@property
def is_dirty(self):
"""Access to the dirty status on this Plug."""
if self._sub_plugs:
for sub_plug in self._sub_plugs.values():
if sub_plug.is_dirty:
return True
return False
else:
return self._is_dirty
@is_dirty.setter
def is_dirty(self, status):
"""Set the Plug dirty informs the node this Plug belongs to."""
self._is_dirty = status
if status:
self.node.on_input_plug_set_dirty()
@abstractmethod
def connect(self, plug): # pragma: no cover
"""Has to be implemented in the subclass."""
raise NotImplementedError("The subclass has to define connect()")
def disconnect(self, plug):
"""Break the connection to the given Plug."""
if isinstance(plug, InputPlugGroup):
for plug_ in plug:
self.disconnect(plug_)
return
if plug in self.connections:
self.connections.pop(self.connections.index(plug))
self.is_dirty = True
if self in plug.connections:
plug.connections.pop(plug.connections.index(self))
plug.is_dirty = True
def promote_to_graph(self, name=None):
"""Add this plug to the graph of this plug's node.
Args:
name (str): Optionally provide a different name for the Plug
"""
self.node.graph.add_plug(self, name=name)
class OutputPlug(IPlug):
"""Provides data to an InputPlug."""
def __init__(self, name, node, accepted_plugs=None):
"""Initialize the OutputPlug.
Can be connected to an InputPlug.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
self.accepted_plugs = (InputPlug, InputPlugGroup)
super(OutputPlug, self).__init__(name, node)
if not isinstance(self, SubPlug):
self.node.outputs[self.name] = self
def __rshift__(self, other):
"""Syntactic sugar for the connect() method.
If `other` is a INode with an input matching this plug's name, connect.
"""
# softly check if the "other" is a Node with inputs
if hasattr(other, "inputs"):
for iname, iplug in other.inputs.items():
if iname == self.name:
target = iplug
else:
target = other
self.connect(target)
def connect(self, plug):
"""Connect this Plug to the given InputPlug.
Set both participating Plugs dirty.
"""
if not isinstance(plug, self.accepted_plugs):
raise TypeError("Cannot connect {0} to {1}".format(
type(self), type(plug)))
if isinstance(plug, InputPlugGroup):
for plug_ in plug:
self.connect(plug_)
return
if self.node.graph.accepts_connection(self, plug):
for connection in plug.connections:
plug.disconnect(connection)
if plug not in self.connections:
self.connections.append(plug)
plug.value = self.value
self.is_dirty = True
plug.is_dirty = True
if self not in plug.connections:
plug.connections = [self]
plug.is_dirty = True
def __getitem__(self, key):
"""Retrieve a sub plug by key.
If it does not exist yet, it is created automatically!
Args:
key (str): The name of the sub plug
"""
if not isinstance(key, basestring):
raise TypeError(
'Only strings are allowed as sub-plug keys! '
'This is due to the fact that JSON serialization only allows '
'strings as keys.')
if not self._sub_plugs.get(key):
self._sub_plugs[key] = SubOutputPlug(
key=key,
node=self.node,
parent_plug=self)
return self._sub_plugs[key]
def _update_value(self, value):
"""Propagate the dirty state to all connected Plugs as well."""
super(OutputPlug, self)._update_value(value)
for plug in self.connections:
plug.value = value
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
for connection in self.connections:
connections.setdefault(connection.node.identifier, [])
connections[connection.node.identifier].append(connection.name)
return {
'name': self.name,
'value': self.value if not self._sub_plugs else None,
'connections': connections,
'sub_plugs': {
name: sub_plug.serialize()
for name, sub_plug in self._sub_plugs.items()
}
}
class InputPlug(IPlug):
"""Receives data from an OutputPlug."""
def __init__(self, name, node, value=None):
"""Initialize the InputPlug.
Can be connected to an OutputPlug.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
self.accepted_plugs = (OutputPlug,)
super(InputPlug, self).__init__(name, node)
self.value = value
self.is_dirty = True
if not isinstance(self, SubPlug):
self.node.inputs[self.name] = self
def connect(self, plug):
"""Connect this Plug to the given OutputPlug.
Set both participating Plugs dirty.
"""
if not isinstance(plug, self.accepted_plugs):
raise TypeError("Cannot connect {0} to {1}".format(
type(self), type(plug)))
plug.connect(self)
def __getitem__(self, key):
"""Retrieve a sub plug by key.
If it does not exist yet, it is created automatically!
Args:
key (str): The name of the sub plug
"""
if not isinstance(key, basestring):
raise TypeError(
'Only strings are allowed as sub-plug keys! '
'This is due to the fact that JSON serialization only allows '
'strings as keys.')
if not self._sub_plugs.get(key):
self._sub_plugs[key] = SubInputPlug(
key=key,
node=self.node,
parent_plug=self)
return self._sub_plugs[key]
def _update_value(self, value):
if self._sub_plugs:
return
super(InputPlug, self)._update_value(value)
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
if self.connections:
connections[self.connections[0].node.identifier] = self.connections[0].name
return {
'name': self.name,
'value': self.value if not self._sub_plugs else None,
'connections': connections,
'sub_plugs': {
name: sub_plug.serialize()
for name, sub_plug in self._sub_plugs.items()
}
}
class SubPlug(object):
"""Mixin that unifies common properties of subplugs."""
@property
def is_dirty(self):
"""Access to the dirty status on this Plug."""
return self._is_dirty
@is_dirty.setter
def is_dirty(self, status):
"""Setting the Plug dirty informs its parent plug."""
self._is_dirty = status
if status:
self.parent_plug.is_dirty = status
def promote_to_graph(self, name=None):
"""Add this plug to the graph of this plug's node.
NOTE: Subplugs can only be added to a graph via their parent plug.
Args:
name (str): Optionally provide a different name for the Plug
"""
# prevent adding SubPlug to the graph witout their parents
raise TypeError(
"Cannot add SubPlug to graph! Add the parent plug instead.")
class SubInputPlug(SubPlug, InputPlug):
"""Held by a parent input plug to form a compound plug."""
def __init__(self, key, node, parent_plug, value=None):
"""Initialize the plug.
Can be connected to an OutputPlug.
Args:
key (str): The key will be used to form the name of the Plug:
{parent_plug.name}.{key}.
node (INode): The Node holding the Plug.
parent_plug (InputPlug): The parent plug holding this Plug.
"""
# super().__init__() refers to self.parent_plug, so need to set it here
self.key = key
self.parent_plug = parent_plug
self.parent_plug._sub_plugs[key] = self
super(SubInputPlug, self).__init__(
'{0}.{1}'.format(parent_plug.name, key), node)
self.value = value
self.is_dirty = True
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
if self.connections:
connections[self.connections[0].node.identifier] = \
self.connections[0].name
return {
'name': self.name,
'value': self.value,
'connections': connections
}
class SubOutputPlug(SubPlug, OutputPlug):
"""Held by a parent output plug to form a compound plug."""
def __init__(self, key, node, parent_plug, value=None):
"""Initialize the plug.
Can be connected to an InputPlug.
Args:
key (str): The key will be used to form the name of the Plug:
{parent_plug.name}.{key}.
node (INode): The Node holding the Plug.
parent_plug (InputPlug): The parent plug holding this Plug.
"""
# super().__init__() refers to self.parent_plug, so need to set it here
self.key = key
self.parent_plug = parent_plug
self.parent_plug._sub_plugs[key] = self
super(SubOutputPlug, self).__init__(
'{0}.{1}'.format(parent_plug.name, key), node)
self.value = value
self.is_dirty = True
def _update_value(self, value):
"""Propagate the dirty state to all connected Plugs as well."""
super(SubOutputPlug, self)._update_value(value)
for plug in self.connections:
plug.value = value
parent_value = self.parent_plug.value or {}
parent_value[self.key] = value
self.parent_plug.value = parent_value
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
for connection in self.connections:
connections.setdefault(connection.node.identifier, [])
connections[connection.node.identifier].append(connection.name)
return {
'name': self.name,
'value': self.value,
'connections': connections
}
class InputPlugGroup(object):
"""Group plugs inside a group into one entry point on the graph."""
def __init__(self, name, graph, plugs=None):
"""Initialize the group and assigning it to the `Graph.input_groups`.
Can be connected to an OutputPlug.
Args:
name (str): The name of the InputPlugGroup.
graph (Graph): The Graph holding the PlugGroup.
plugs (list of InputPlug): The plugs in this group.
"""
self.name = name
self.graph = graph
self.plugs = plugs or []
self.graph.inputs[self.name] = self
def connect(self, plug):
"""Connect all plugs in this group to the given plug."""
for input_plug in self.plugs:
plug.connect(input_plug)
def disconnect(self, plug):
"""Disconnect all plugs in this group from the given plug."""
for input_plug in self.plugs:
plug.disconnect(input_plug)
def __iter__(self):
"""Convenience to iterate over the plugs in this group."""
for plug in self.plugs:
yield plug
def __rshift__(self, other):
"""Syntactic sugar for the connect() method."""
self.connect(other)
def __lshift__(self, other):
"""Syntactic sugar for the disconnect() method."""
self.disconnect(other)
@property
def value(self):
"""Getting the value of an InputPlugGroup is not supported.
The value property is implemented nonetheless, in order to allow for
convenient setting of the value of all plugs in the InputPlugGroup.
"""
raise AttributeError(
"Getting the value of an InputPlugGroup is not supported")
@value.setter
def value(self, new_value):
"""Set the value for all grouped plugs."""
for plug in self.plugs:
plug.value = new_value
| [
"\"\"\"Plugs are ins and outs for Nodes through which they exchange data.\"\"\"\n",
"from __future__ import print_function\n",
"\n",
"import sys\n",
"import warnings\n",
"from abc import abstractmethod\n",
"\n",
"from .utilities import get_hash\n",
"\n",
"try:\n",
" from collections import OrderedDict\n",
"except ImportError:\n",
" from ordereddict import OrderedDict\n",
"\n",
"if sys.version_info.major > 2: # pragma: no cover\n",
" basestring = str\n",
"\n",
"\n",
"class IPlug(object):\n",
" \"\"\"The interface for the plugs.\n",
"\n",
" Plugs are associated with a Node and can be connected, disconnected\n",
" and hold a value, that can be accesses by the associated Node.\n",
" \"\"\"\n",
"\n",
" def __init__(self, name, node):\n",
" \"\"\"Initialize the Interface.\n",
"\n",
" Args:\n",
" name (str): The name of the Plug.\n",
" node (INode): The Node holding the Plug.\n",
" \"\"\"\n",
" if '.' in name and not isinstance(self, SubPlug):\n",
" raise ValueError(\n",
" 'Names for plugs can not contain dots \".\" as these are '\n",
" 'reserved to identify sub plugs.')\n",
" self.name = name\n",
" self.node = node\n",
" self.connections = []\n",
" self._sub_plugs = OrderedDict()\n",
" self._value = None\n",
" self._is_dirty = True\n",
"\n",
" def __rshift__(self, other):\n",
" \"\"\"Create a connection to the given IPlug.\n",
"\n",
" Args:\n",
" other (IPlug): The IPlug to connect to.\n",
" \"\"\"\n",
" warnings.warn(\"Use the connect method instead\",\n",
" DeprecationWarning, stacklevel=2)\n",
" self.connect(other)\n",
"\n",
" def __lshift__(self, other):\n",
" \"\"\"Break a connection to the given IPlug.\n",
"\n",
" Args:\n",
" other (IPlug): The IPlug to disconnect.\n",
" \"\"\"\n",
" warnings.warn(\"Use the disconnect method instead\",\n",
" DeprecationWarning, stacklevel=2)\n",
" self.disconnect(other)\n",
"\n",
" # Extra function to make re-use in subclasses easier\n",
" def _update_value(self, value):\n",
" \"\"\"Update the internal value.\"\"\"\n",
" old_hash = get_hash(self._value)\n",
" new_hash = get_hash(value)\n",
" self._value = value\n",
" if old_hash is None or new_hash is None or (old_hash != new_hash):\n",
" self.is_dirty = True\n",
"\n",
" @property\n",
" def value(self):\n",
" \"\"\"Access to the value on this Plug.\"\"\"\n",
" if self._sub_plugs:\n",
" return {name: plug.value for name, plug in self._sub_plugs.items()}\n",
" return self._value\n",
"\n",
" @value.setter\n",
" def value(self, value):\n",
" \"\"\"Set the Plug dirty when the value is being changed.\"\"\"\n",
" self._update_value(value)\n",
"\n",
" @property\n",
" def is_dirty(self):\n",
" \"\"\"Access to the dirty status on this Plug.\"\"\"\n",
" if self._sub_plugs:\n",
" for sub_plug in self._sub_plugs.values():\n",
" if sub_plug.is_dirty:\n",
" return True\n",
" return False\n",
" else:\n",
" return self._is_dirty\n",
"\n",
" @is_dirty.setter\n",
" def is_dirty(self, status):\n",
" \"\"\"Set the Plug dirty informs the node this Plug belongs to.\"\"\"\n",
" self._is_dirty = status\n",
" if status:\n",
" self.node.on_input_plug_set_dirty()\n",
"\n",
" @abstractmethod\n",
" def connect(self, plug): # pragma: no cover\n",
" \"\"\"Has to be implemented in the subclass.\"\"\"\n",
" raise NotImplementedError(\"The subclass has to define connect()\")\n",
"\n",
" def disconnect(self, plug):\n",
" \"\"\"Break the connection to the given Plug.\"\"\"\n",
" if isinstance(plug, InputPlugGroup):\n",
" for plug_ in plug:\n",
" self.disconnect(plug_)\n",
" return\n",
" if plug in self.connections:\n",
" self.connections.pop(self.connections.index(plug))\n",
" self.is_dirty = True\n",
" if self in plug.connections:\n",
" plug.connections.pop(plug.connections.index(self))\n",
" plug.is_dirty = True\n",
"\n",
" def promote_to_graph(self, name=None):\n",
" \"\"\"Add this plug to the graph of this plug's node.\n",
"\n",
" Args:\n",
" name (str): Optionally provide a different name for the Plug\n",
" \"\"\"\n",
" self.node.graph.add_plug(self, name=name)\n",
"\n",
"\n",
"class OutputPlug(IPlug):\n",
" \"\"\"Provides data to an InputPlug.\"\"\"\n",
"\n",
" def __init__(self, name, node, accepted_plugs=None):\n",
" \"\"\"Initialize the OutputPlug.\n",
"\n",
" Can be connected to an InputPlug.\n",
" Args:\n",
" name (str): The name of the Plug.\n",
" node (INode): The Node holding the Plug.\n",
" \"\"\"\n",
" self.accepted_plugs = (InputPlug, InputPlugGroup)\n",
" super(OutputPlug, self).__init__(name, node)\n",
" if not isinstance(self, SubPlug):\n",
" self.node.outputs[self.name] = self\n",
"\n",
" def __rshift__(self, other):\n",
" \"\"\"Syntactic sugar for the connect() method.\n",
"\n",
" If `other` is a INode with an input matching this plug's name, connect.\n",
" \"\"\"\n",
" # softly check if the \"other\" is a Node with inputs\n",
" if hasattr(other, \"inputs\"):\n",
" for iname, iplug in other.inputs.items():\n",
" if iname == self.name:\n",
" target = iplug\n",
" else:\n",
" target = other\n",
" self.connect(target)\n",
"\n",
" def connect(self, plug):\n",
" \"\"\"Connect this Plug to the given InputPlug.\n",
"\n",
" Set both participating Plugs dirty.\n",
" \"\"\"\n",
" if not isinstance(plug, self.accepted_plugs):\n",
" raise TypeError(\"Cannot connect {0} to {1}\".format(\n",
" type(self), type(plug)))\n",
" if isinstance(plug, InputPlugGroup):\n",
" for plug_ in plug:\n",
" self.connect(plug_)\n",
" return\n",
"\n",
" if self.node.graph.accepts_connection(self, plug):\n",
" for connection in plug.connections:\n",
" plug.disconnect(connection)\n",
" if plug not in self.connections:\n",
" self.connections.append(plug)\n",
" plug.value = self.value\n",
" self.is_dirty = True\n",
" plug.is_dirty = True\n",
" if self not in plug.connections:\n",
" plug.connections = [self]\n",
" plug.is_dirty = True\n",
"\n",
" def __getitem__(self, key):\n",
" \"\"\"Retrieve a sub plug by key.\n",
"\n",
" If it does not exist yet, it is created automatically!\n",
" Args:\n",
" key (str): The name of the sub plug\n",
" \"\"\"\n",
" if not isinstance(key, basestring):\n",
" raise TypeError(\n",
" 'Only strings are allowed as sub-plug keys! '\n",
" 'This is due to the fact that JSON serialization only allows '\n",
" 'strings as keys.')\n",
" if not self._sub_plugs.get(key):\n",
" self._sub_plugs[key] = SubOutputPlug(\n",
" key=key,\n",
" node=self.node,\n",
" parent_plug=self)\n",
" return self._sub_plugs[key]\n",
"\n",
" def _update_value(self, value):\n",
" \"\"\"Propagate the dirty state to all connected Plugs as well.\"\"\"\n",
" super(OutputPlug, self)._update_value(value)\n",
" for plug in self.connections:\n",
" plug.value = value\n",
"\n",
" def serialize(self):\n",
" \"\"\"Serialize the Plug containing all it's connections.\"\"\"\n",
" connections = {}\n",
" for connection in self.connections:\n",
" connections.setdefault(connection.node.identifier, [])\n",
" connections[connection.node.identifier].append(connection.name)\n",
" return {\n",
" 'name': self.name,\n",
" 'value': self.value if not self._sub_plugs else None,\n",
" 'connections': connections,\n",
" 'sub_plugs': {\n",
" name: sub_plug.serialize()\n",
" for name, sub_plug in self._sub_plugs.items()\n",
" }\n",
" }\n",
"\n",
"\n",
"class InputPlug(IPlug):\n",
" \"\"\"Receives data from an OutputPlug.\"\"\"\n",
"\n",
" def __init__(self, name, node, value=None):\n",
" \"\"\"Initialize the InputPlug.\n",
"\n",
" Can be connected to an OutputPlug.\n",
" Args:\n",
" name (str): The name of the Plug.\n",
" node (INode): The Node holding the Plug.\n",
" \"\"\"\n",
" self.accepted_plugs = (OutputPlug,)\n",
"\n",
" super(InputPlug, self).__init__(name, node)\n",
" self.value = value\n",
" self.is_dirty = True\n",
" if not isinstance(self, SubPlug):\n",
" self.node.inputs[self.name] = self\n",
"\n",
" def connect(self, plug):\n",
" \"\"\"Connect this Plug to the given OutputPlug.\n",
"\n",
" Set both participating Plugs dirty.\n",
" \"\"\"\n",
" if not isinstance(plug, self.accepted_plugs):\n",
" raise TypeError(\"Cannot connect {0} to {1}\".format(\n",
" type(self), type(plug)))\n",
" plug.connect(self)\n",
"\n",
" def __getitem__(self, key):\n",
" \"\"\"Retrieve a sub plug by key.\n",
"\n",
" If it does not exist yet, it is created automatically!\n",
" Args:\n",
" key (str): The name of the sub plug\n",
" \"\"\"\n",
" if not isinstance(key, basestring):\n",
" raise TypeError(\n",
" 'Only strings are allowed as sub-plug keys! '\n",
" 'This is due to the fact that JSON serialization only allows '\n",
" 'strings as keys.')\n",
" if not self._sub_plugs.get(key):\n",
" self._sub_plugs[key] = SubInputPlug(\n",
" key=key,\n",
" node=self.node,\n",
" parent_plug=self)\n",
" return self._sub_plugs[key]\n",
"\n",
" def _update_value(self, value):\n",
" if self._sub_plugs:\n",
" return\n",
" super(InputPlug, self)._update_value(value)\n",
"\n",
" def serialize(self):\n",
" \"\"\"Serialize the Plug containing all it's connections.\"\"\"\n",
" connections = {}\n",
" if self.connections:\n",
" connections[self.connections[0].node.identifier] = self.connections[0].name\n",
" return {\n",
" 'name': self.name,\n",
" 'value': self.value if not self._sub_plugs else None,\n",
" 'connections': connections,\n",
" 'sub_plugs': {\n",
" name: sub_plug.serialize()\n",
" for name, sub_plug in self._sub_plugs.items()\n",
" }\n",
" }\n",
"\n",
"\n",
"class SubPlug(object):\n",
" \"\"\"Mixin that unifies common properties of subplugs.\"\"\"\n",
"\n",
" @property\n",
" def is_dirty(self):\n",
" \"\"\"Access to the dirty status on this Plug.\"\"\"\n",
" return self._is_dirty\n",
"\n",
" @is_dirty.setter\n",
" def is_dirty(self, status):\n",
" \"\"\"Setting the Plug dirty informs its parent plug.\"\"\"\n",
" self._is_dirty = status\n",
" if status:\n",
" self.parent_plug.is_dirty = status\n",
"\n",
" def promote_to_graph(self, name=None):\n",
" \"\"\"Add this plug to the graph of this plug's node.\n",
"\n",
" NOTE: Subplugs can only be added to a graph via their parent plug.\n",
"\n",
" Args:\n",
" name (str): Optionally provide a different name for the Plug\n",
" \"\"\"\n",
" # prevent adding SubPlug to the graph witout their parents\n",
" raise TypeError(\n",
" \"Cannot add SubPlug to graph! Add the parent plug instead.\")\n",
"\n",
"\n",
"class SubInputPlug(SubPlug, InputPlug):\n",
" \"\"\"Held by a parent input plug to form a compound plug.\"\"\"\n",
"\n",
" def __init__(self, key, node, parent_plug, value=None):\n",
" \"\"\"Initialize the plug.\n",
"\n",
" Can be connected to an OutputPlug.\n",
" Args:\n",
" key (str): The key will be used to form the name of the Plug:\n",
" {parent_plug.name}.{key}.\n",
" node (INode): The Node holding the Plug.\n",
" parent_plug (InputPlug): The parent plug holding this Plug.\n",
" \"\"\"\n",
" # super().__init__() refers to self.parent_plug, so need to set it here\n",
" self.key = key\n",
" self.parent_plug = parent_plug\n",
" self.parent_plug._sub_plugs[key] = self\n",
"\n",
" super(SubInputPlug, self).__init__(\n",
" '{0}.{1}'.format(parent_plug.name, key), node)\n",
" self.value = value\n",
" self.is_dirty = True\n",
"\n",
" def serialize(self):\n",
" \"\"\"Serialize the Plug containing all it's connections.\"\"\"\n",
" connections = {}\n",
" if self.connections:\n",
" connections[self.connections[0].node.identifier] = \\\n",
" self.connections[0].name\n",
" return {\n",
" 'name': self.name,\n",
" 'value': self.value,\n",
" 'connections': connections\n",
" }\n",
"\n",
"\n",
"class SubOutputPlug(SubPlug, OutputPlug):\n",
" \"\"\"Held by a parent output plug to form a compound plug.\"\"\"\n",
"\n",
" def __init__(self, key, node, parent_plug, value=None):\n",
" \"\"\"Initialize the plug.\n",
"\n",
" Can be connected to an InputPlug.\n",
" Args:\n",
" key (str): The key will be used to form the name of the Plug:\n",
" {parent_plug.name}.{key}.\n",
" node (INode): The Node holding the Plug.\n",
" parent_plug (InputPlug): The parent plug holding this Plug.\n",
" \"\"\"\n",
" # super().__init__() refers to self.parent_plug, so need to set it here\n",
" self.key = key\n",
" self.parent_plug = parent_plug\n",
" self.parent_plug._sub_plugs[key] = self\n",
"\n",
" super(SubOutputPlug, self).__init__(\n",
" '{0}.{1}'.format(parent_plug.name, key), node)\n",
" self.value = value\n",
" self.is_dirty = True\n",
"\n",
" def _update_value(self, value):\n",
" \"\"\"Propagate the dirty state to all connected Plugs as well.\"\"\"\n",
" super(SubOutputPlug, self)._update_value(value)\n",
" for plug in self.connections:\n",
" plug.value = value\n",
" parent_value = self.parent_plug.value or {}\n",
" parent_value[self.key] = value\n",
" self.parent_plug.value = parent_value\n",
"\n",
" def serialize(self):\n",
" \"\"\"Serialize the Plug containing all it's connections.\"\"\"\n",
" connections = {}\n",
" for connection in self.connections:\n",
" connections.setdefault(connection.node.identifier, [])\n",
" connections[connection.node.identifier].append(connection.name)\n",
" return {\n",
" 'name': self.name,\n",
" 'value': self.value,\n",
" 'connections': connections\n",
" }\n",
"\n",
"\n",
"class InputPlugGroup(object):\n",
" \"\"\"Group plugs inside a group into one entry point on the graph.\"\"\"\n",
"\n",
" def __init__(self, name, graph, plugs=None):\n",
" \"\"\"Initialize the group and assigning it to the `Graph.input_groups`.\n",
"\n",
" Can be connected to an OutputPlug.\n",
" Args:\n",
" name (str): The name of the InputPlugGroup.\n",
" graph (Graph): The Graph holding the PlugGroup.\n",
" plugs (list of InputPlug): The plugs in this group.\n",
" \"\"\"\n",
" self.name = name\n",
" self.graph = graph\n",
" self.plugs = plugs or []\n",
" self.graph.inputs[self.name] = self\n",
"\n",
" def connect(self, plug):\n",
" \"\"\"Connect all plugs in this group to the given plug.\"\"\"\n",
" for input_plug in self.plugs:\n",
" plug.connect(input_plug)\n",
"\n",
" def disconnect(self, plug):\n",
" \"\"\"Disconnect all plugs in this group from the given plug.\"\"\"\n",
" for input_plug in self.plugs:\n",
" plug.disconnect(input_plug)\n",
"\n",
" def __iter__(self):\n",
" \"\"\"Convenience to iterate over the plugs in this group.\"\"\"\n",
" for plug in self.plugs:\n",
" yield plug\n",
"\n",
" def __rshift__(self, other):\n",
" \"\"\"Syntactic sugar for the connect() method.\"\"\"\n",
" self.connect(other)\n",
"\n",
" def __lshift__(self, other):\n",
" \"\"\"Syntactic sugar for the disconnect() method.\"\"\"\n",
" self.disconnect(other)\n",
"\n",
" @property\n",
" def value(self):\n",
" \"\"\"Getting the value of an InputPlugGroup is not supported.\n",
"\n",
" The value property is implemented nonetheless, in order to allow for\n",
" convenient setting of the value of all plugs in the InputPlugGroup.\n",
" \"\"\"\n",
" raise AttributeError(\n",
" \"Getting the value of an InputPlugGroup is not supported\")\n",
"\n",
" @value.setter\n",
" def value(self, new_value):\n",
" \"\"\"Set the value for all grouped plugs.\"\"\"\n",
" for plug in self.plugs:\n",
" plug.value = new_value\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 459 | 0.000025 |
"""
AWS Lambda execution environment wrapper
@author: David Moss
@copyright: 2017 People Power Company. All rights reserved.
@contact: [email protected]
"""
import botengine
import importlib
import time
def lambda_handler(data, context):
"""
Execution wrapper on AWS Lambda
:param data: Inputs to the botengine
:param context: Ignored
:return: JSON structure with errors and debug information
"""
if data is None:
return 0
logger = LambdaLogger()
try:
bot = importlib.import_module('bot')
botengine._run(bot, data, logger, context)
except:
import traceback
import sys
(t, v, tb) = sys.exc_info()
logger.tracebacks = traceback.format_exception(t, v, tb)
# Check for asynchronous data request triggers which handle errors differently than synchronous executions of the bot.
if 'inputs' in data:
for i in data['inputs']:
if i['trigger'] == 2048:
import sys
if len(logger.logs) > 0:
sys.stdout.write("logs: ")
for log in logger.logs:
sys.stdout.write(log + "; ")
if len(logger.tracebacks) > 0:
sys.stdout.write("tracebacks: ")
for tb in logger.tracebacks:
sys.stdout.write(tb + "; ")
sys.stdout.flush()
break
if 'sqsQueue' in data:
import json
send_sqs_message(data.get('sqsQueue'), json.dumps(logger.get_lambda_return(data)), data.get('clientContext'))
return logger.get_lambda_return(data)
def send_sqs_message(queue_name, msg_body, client_context):
"""
Method to deliver back to the server the logs and tracebacks during asynchronous parallel processed machine learning data request triggers
:param queue_name:
:param msg_body:
:param client_context:
:return:
"""
import boto3
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
queue.send_message(MessageBody=msg_body, MessageAttributes={
'ClientContext': {
'StringValue': client_context,
'DataType': 'String'
}
})
class LambdaLogger():
def __init__(self):
# Tracebacks for crashes
self.tracebacks = []
# Logs
self.logs = []
# Start Code - provided by the server in response to the Start API
self.start_code = 0
def log(self, level, message):
pass
def debug(self, message):
pass
def info(self, message):
pass
def warning(self, message):
self.logs.append("{}: [{}] {}".format(time.time(), "WARNING", message))
def warn(self, message):
self.logs.append("{}: [{}] {}".format(time.time(), "WARNING", message))
def error(self, message):
self.logs.append("{}: [{}] {}".format(time.time(), "ERROR", message))
def critical(self, message):
self.logs.append("{}: [{}] {}".format(time.time(), "CRITICAL", message))
def exception(self, message):
self.logs.append("{}: [{}] {}".format(time.time(), "EXCEPTION", message))
def get_lambda_return(self, data):
"""
:param data: Raw JSON data input to the botengine
:return: JSON dictionary of execution details, only if we have info to share
"""
response = {}
if len(self.tracebacks):
response['tracebacks'] = self.tracebacks
if len(self.logs) > 0:
self.logs.append(self._form_admin_url(data))
if len(self.logs):
response['logs'] = self.logs
response['startCode'] = self.start_code
return response
def _form_admin_url(self, data):
"""
Form a URL that an administrator can click on
:param data: Raw JSON data input to the botengine
:return: Formatted URL
"""
if 'apiHost' not in data:
return "<No apiHost>"
if 'inputs' not in data:
return "<No inputs>"
base_url = "https://maestro.peoplepowerco.com"
# Add specific command center URLs here
try:
# This bundle.py is generated automatically by the botengine CLI when we create or upload a bot.
# It includes a variable called CLOUD_ADDRESS that describes what cloud we uploaded the bot to.
import bundle
if 'sbox' in bundle.CLOUD_ADDRESS:
base_url = "https://cc.presencepro.com"
location_id = "NoLocationId"
for i in data['inputs']:
if 'locationId' in i:
location_id = i['locationId']
return "{}/#!/main/locations/edit/{}".format(base_url, location_id)
except:
return "<Error importing auto-generated bundle.py>"
| [
"\"\"\"\n",
"AWS Lambda execution environment wrapper\n",
"\n",
"@author: David Moss\n",
"\n",
"@copyright: 2017 People Power Company. All rights reserved.\n",
"\n",
"@contact: [email protected]\n",
"\"\"\"\n",
"\n",
"import botengine\n",
"import importlib\n",
"import time\n",
"\n",
"\n",
"def lambda_handler(data, context):\n",
" \"\"\"\n",
" Execution wrapper on AWS Lambda\n",
" :param data: Inputs to the botengine\n",
" :param context: Ignored\n",
" :return: JSON structure with errors and debug information\n",
" \"\"\"\n",
" if data is None:\n",
" return 0\n",
" \n",
" logger = LambdaLogger()\n",
" \n",
" try:\n",
" bot = importlib.import_module('bot')\n",
" botengine._run(bot, data, logger, context)\n",
" \n",
" except:\n",
" import traceback\n",
" import sys\n",
" (t, v, tb) = sys.exc_info()\n",
" logger.tracebacks = traceback.format_exception(t, v, tb)\n",
"\n",
" # Check for asynchronous data request triggers which handle errors differently than synchronous executions of the bot.\n",
" if 'inputs' in data:\n",
" for i in data['inputs']:\n",
" if i['trigger'] == 2048:\n",
" import sys\n",
"\n",
" if len(logger.logs) > 0:\n",
" sys.stdout.write(\"logs: \")\n",
" for log in logger.logs:\n",
" sys.stdout.write(log + \"; \")\n",
"\n",
" if len(logger.tracebacks) > 0:\n",
" sys.stdout.write(\"tracebacks: \")\n",
" for tb in logger.tracebacks:\n",
" sys.stdout.write(tb + \"; \")\n",
"\n",
" sys.stdout.flush()\n",
" break\n",
"\n",
" if 'sqsQueue' in data:\n",
" import json\n",
" send_sqs_message(data.get('sqsQueue'), json.dumps(logger.get_lambda_return(data)), data.get('clientContext'))\n",
"\n",
" return logger.get_lambda_return(data)\n",
"\n",
"\n",
"def send_sqs_message(queue_name, msg_body, client_context):\n",
" \"\"\"\n",
" Method to deliver back to the server the logs and tracebacks during asynchronous parallel processed machine learning data request triggers\n",
" :param queue_name:\n",
" :param msg_body:\n",
" :param client_context:\n",
" :return:\n",
" \"\"\"\n",
" import boto3\n",
" sqs = boto3.resource('sqs')\n",
" queue = sqs.get_queue_by_name(QueueName=queue_name)\n",
" queue.send_message(MessageBody=msg_body, MessageAttributes={\n",
" 'ClientContext': {\n",
" 'StringValue': client_context,\n",
" 'DataType': 'String'\n",
" }\n",
" })\n",
"\n",
"\n",
"class LambdaLogger():\n",
" \n",
" def __init__(self):\n",
" # Tracebacks for crashes\n",
" self.tracebacks = []\n",
"\n",
" # Logs\n",
" self.logs = []\n",
"\n",
" # Start Code - provided by the server in response to the Start API\n",
" self.start_code = 0\n",
"\n",
" def log(self, level, message):\n",
" pass\n",
"\n",
" def debug(self, message):\n",
" pass\n",
"\n",
" def info(self, message):\n",
" pass\n",
"\n",
" def warning(self, message):\n",
" self.logs.append(\"{}: [{}] {}\".format(time.time(), \"WARNING\", message))\n",
"\n",
" def warn(self, message):\n",
" self.logs.append(\"{}: [{}] {}\".format(time.time(), \"WARNING\", message))\n",
"\n",
" def error(self, message):\n",
" self.logs.append(\"{}: [{}] {}\".format(time.time(), \"ERROR\", message))\n",
"\n",
" def critical(self, message):\n",
" self.logs.append(\"{}: [{}] {}\".format(time.time(), \"CRITICAL\", message))\n",
"\n",
" def exception(self, message):\n",
" self.logs.append(\"{}: [{}] {}\".format(time.time(), \"EXCEPTION\", message))\n",
"\n",
" def get_lambda_return(self, data):\n",
" \"\"\"\n",
" :param data: Raw JSON data input to the botengine\n",
" :return: JSON dictionary of execution details, only if we have info to share\n",
" \"\"\"\n",
" response = {}\n",
" \n",
" if len(self.tracebacks):\n",
" response['tracebacks'] = self.tracebacks\n",
"\n",
" if len(self.logs) > 0:\n",
" self.logs.append(self._form_admin_url(data))\n",
"\n",
" if len(self.logs):\n",
" response['logs'] = self.logs\n",
"\n",
" response['startCode'] = self.start_code\n",
" \n",
" return response\n",
"\n",
" def _form_admin_url(self, data):\n",
" \"\"\"\n",
" Form a URL that an administrator can click on\n",
" :param data: Raw JSON data input to the botengine\n",
" :return: Formatted URL\n",
" \"\"\"\n",
" if 'apiHost' not in data:\n",
" return \"<No apiHost>\"\n",
"\n",
" if 'inputs' not in data:\n",
" return \"<No inputs>\"\n",
"\n",
" base_url = \"https://maestro.peoplepowerco.com\"\n",
"\n",
" # Add specific command center URLs here\n",
" try:\n",
" # This bundle.py is generated automatically by the botengine CLI when we create or upload a bot.\n",
" # It includes a variable called CLOUD_ADDRESS that describes what cloud we uploaded the bot to.\n",
" import bundle\n",
" if 'sbox' in bundle.CLOUD_ADDRESS:\n",
" base_url = \"https://cc.presencepro.com\"\n",
"\n",
" location_id = \"NoLocationId\"\n",
" for i in data['inputs']:\n",
" if 'locationId' in i:\n",
" location_id = i['locationId']\n",
"\n",
" return \"{}/#!/main/locations/edit/{}\".format(base_url, location_id)\n",
"\n",
" except:\n",
" return \"<Error importing auto-generated bundle.py>\"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.2,
0,
0,
0,
0.1111111111111111,
0.08333333333333333,
0,
0,
0,
0,
0,
0.008130081300813009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0.006993006993006993,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0
] | 169 | 0.006849 |
"""The tests for the Command line Binary sensor platform."""
import unittest
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.components.binary_sensor import command_line
from homeassistant import bootstrap
from homeassistant.helpers import template
from tests.common import get_test_home_assistant
class TestCommandSensorBinarySensor(unittest.TestCase):
"""Test the Command line Binary sensor."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test sensor setup."""
config = {'name': 'Test',
'command': 'echo 1',
'payload_on': '1',
'payload_off': '0'}
devices = []
def add_dev_callback(devs):
"""Add callback to add devices."""
for dev in devs:
devices.append(dev)
command_line.setup_platform(self.hass, config, add_dev_callback)
self.assertEqual(1, len(devices))
entity = devices[0]
self.assertEqual('Test', entity.name)
self.assertEqual(STATE_ON, entity.state)
def test_setup_bad_config(self):
"""Test the setup with a bad configuration."""
config = {'name': 'test',
'platform': 'not_command_line',
}
self.assertFalse(bootstrap.setup_component(self.hass, 'test', {
'command_line': config,
}))
def test_template(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData('echo 10')
entity = command_line.CommandBinarySensor(
self.hass, data, 'test', None, '1.0', '0',
template.Template('{{ value | multiply(0.1) }}', self.hass))
self.assertEqual(STATE_ON, entity.state)
def test_sensor_off(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData('echo 0')
entity = command_line.CommandBinarySensor(
self.hass, data, 'test', None, '1', '0', None)
self.assertEqual(STATE_OFF, entity.state)
| [
"\"\"\"The tests for the Command line Binary sensor platform.\"\"\"\n",
"import unittest\n",
"\n",
"from homeassistant.const import (STATE_ON, STATE_OFF)\n",
"from homeassistant.components.binary_sensor import command_line\n",
"from homeassistant import bootstrap\n",
"from homeassistant.helpers import template\n",
"\n",
"from tests.common import get_test_home_assistant\n",
"\n",
"\n",
"class TestCommandSensorBinarySensor(unittest.TestCase):\n",
" \"\"\"Test the Command line Binary sensor.\"\"\"\n",
"\n",
" def setUp(self):\n",
" \"\"\"Setup things to be run when tests are started.\"\"\"\n",
" self.hass = get_test_home_assistant()\n",
"\n",
" def tearDown(self):\n",
" \"\"\"Stop everything that was started.\"\"\"\n",
" self.hass.stop()\n",
"\n",
" def test_setup(self):\n",
" \"\"\"Test sensor setup.\"\"\"\n",
" config = {'name': 'Test',\n",
" 'command': 'echo 1',\n",
" 'payload_on': '1',\n",
" 'payload_off': '0'}\n",
"\n",
" devices = []\n",
"\n",
" def add_dev_callback(devs):\n",
" \"\"\"Add callback to add devices.\"\"\"\n",
" for dev in devs:\n",
" devices.append(dev)\n",
"\n",
" command_line.setup_platform(self.hass, config, add_dev_callback)\n",
"\n",
" self.assertEqual(1, len(devices))\n",
" entity = devices[0]\n",
" self.assertEqual('Test', entity.name)\n",
" self.assertEqual(STATE_ON, entity.state)\n",
"\n",
" def test_setup_bad_config(self):\n",
" \"\"\"Test the setup with a bad configuration.\"\"\"\n",
" config = {'name': 'test',\n",
" 'platform': 'not_command_line',\n",
" }\n",
"\n",
" self.assertFalse(bootstrap.setup_component(self.hass, 'test', {\n",
" 'command_line': config,\n",
" }))\n",
"\n",
" def test_template(self):\n",
" \"\"\"Test setting the state with a template.\"\"\"\n",
" data = command_line.CommandSensorData('echo 10')\n",
"\n",
" entity = command_line.CommandBinarySensor(\n",
" self.hass, data, 'test', None, '1.0', '0',\n",
" template.Template('{{ value | multiply(0.1) }}', self.hass))\n",
"\n",
" self.assertEqual(STATE_ON, entity.state)\n",
"\n",
" def test_sensor_off(self):\n",
" \"\"\"Test setting the state with a template.\"\"\"\n",
" data = command_line.CommandSensorData('echo 0')\n",
"\n",
" entity = command_line.CommandBinarySensor(\n",
" self.hass, data, 'test', None, '1', '0', None)\n",
"\n",
" self.assertEqual(STATE_OFF, entity.state)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 71 | 0 |
#!/usr/bin/env python
from __future__ import unicode_literals
import collections
import datetime
import functools
import logging
import os
import re
import struct
import sys
import tornado.autoreload
import tornado.ioloop
import tornado.options
import tornado.tcpserver
import tornado.stack_context
from tornado.util import bytes_type
import torncached.errors
import torncached.options
import torncached.storage
class MemcacheServer(tornado.tcpserver.TCPServer):
def __init__(self, *args, **kwargs):
super(MemcacheServer, self).__init__()
self._storage = torncached.storage.MemcacheStorage()
def handle_stream(self, stream, address):
MemcacheConnection(stream, address, self._storage)
class MemcacheConnection(object):
def __init__(self, stream, address, storage):
stream.read_bytes(1, functools.partial(self.detect_protocol, stream, address, storage))
def detect_protocol(self, stream, address, storage, buf):
try:
self._protocol = MemcacheBinaryProtocolHandler(stream, address, storage, buf)
except torncached.errors.ProtocolError:
self._protocol = MemcacheAsciiProtocolHandler(stream, address, storage, buf)
class MemcacheProtocolHandler(object):
def __init__(self, stream, address, storage):
self.stream = stream
self.address = address
self.storage = storage
class MemcacheAsciiProtocolHandler(MemcacheProtocolHandler):
def __init__(self, stream, address, storage, buf=None):
super(MemcacheAsciiProtocolHandler, self).__init__(stream, address, storage)
self._request_finished = False
self._header_callback = tornado.stack_context.wrap(self._on_headers)
self._write_callback = None
logging.info("%d: Client using the ascii protocol" % (stream.fileno()))
self.read_next_command(buf)
def close(self):
logging.info("<%d connection closed." % (self.stream.fileno()))
self.stream.close()
def write(self, chunk, callback=None):
assert self._request, "Request closed"
if not self.stream.closed():
try:
logging.debug(">%d %s" % (self.stream.fileno(), chunk.rstrip().decode("utf-8")))
except UnicodeDecodeError:
logging.info(">%d sending %d bytes" % (self.stream.fileno(), len(chunk)))
self._write_callback = tornado.stack_context.wrap(callback)
self.stream.write(chunk, self._on_write_complete)
def finish(self):
assert self._request, "Request closed"
self._request_finished = True
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
callback()
if self._request_finished and not self.stream.writing():
self._finish_request()
def _finish_request(self):
self._request = None
self._request_finished = False
self.close()
STORAGE_COMMANDS = re.compile(r'^([a-z]+) +(\S+) +(\d+) +(\d+) +(\d+)(?: +(noreply))?$')
RETRIEVAL_COMMANDS = re.compile(r'^([a-z]+)(?: +(.*))?$')
def _on_headers(self, data):
data = data.rstrip().decode("utf-8")
logging.info("<%d %s" % (self.stream.fileno(), data))
s = self.STORAGE_COMMANDS.match(data)
if s is not None:
command, key, flags, exptime, _bytes, noreply = s.groups()
self._request = MemcacheAsciiCommand(command, key,
flags=0 if flags is None else int(flags),
exptime=0 if exptime is None else int(exptime),
noreply=noreply=="noreply")
content_length = 0 if _bytes is None else int(_bytes)
if 0 < content_length:
self.stream.read_bytes(content_length, self._on_request_body)
else:
self.write(b"ERROR\r\n")
else:
r = self.RETRIEVAL_COMMANDS.match(data)
if r is not None:
command, key = r.groups()
self._request = MemcacheAsciiCommand(command, "" if key is None else key)
self.request_callback(self._request)
else:
self._request = MemcacheAsciiCommand("", "")
self.write(b"ERROR\r\n")
self.read_next_command()
def _on_request_body(self, data):
def __on_request_body(newline):
self._request.body = data
self.request_callback(self._request)
self.stream.read_until_regex(b"\r?\n", __on_request_body) # skip trailing newline
def request_callback(self, request):
command = "on_%s_command" % request.command
if hasattr(self, command):
getattr(self, command)(request)
else:
self.write(b"ERROR\r\n")
self.read_next_command()
def read_next_command(self, buf=None):
def prepend_buffer(data):
if buf is not None:
data = buf + data
self._header_callback(data)
def read_command():
self._request = None
self.stream.read_until_regex(b"\r?\n", prepend_buffer)
if 0.0 < tornado.options.options.slowdown:
timedelta = datetime.timedelta(seconds=tornado.options.options.slowdown)
self.stream.io_loop.add_timeout(timedelta, read_command)
else:
read_command()
## Storage commands
def on_set_command(self, request):
if not request.noreply:
if self.storage.set(request.key, request.body, request.flags, request.exptime):
self.write(b"STORED\r\n")
self.read_next_command()
def on_add_command(self, request):
if not request.noreply:
if self.storage.add(request.key, request.body, request.flags, request.exptime):
self.write(b"STORED\r\n")
else:
self.write(b"NOT_STORED\r\n")
self.read_next_command()
def on_replace_command(self, request):
if not request.noreply:
if self.storage.replace(request.key, request.body, request.flags, request.exptime):
self.write(b"STORED\r\n")
else:
self.write(b"NOT_STORED\r\n")
self.read_next_command()
def on_append_command(self, request):
if not request.noreply:
if self.storage.append(request.key, request.body, request.flags, request.exptime):
self.write(b"STORED\r\n")
else:
self.write(b"NOT_STORED\r\n")
self.read_next_command()
def on_prepend_command(self, request):
if not request.noreply:
if self.storage.prepend(request.key, request.body, request.flags, request.exptime):
self.write(b"STORED\r\n")
else:
self.write(b"NOT_STORED\r\n")
self.read_next_command()
## Retrieval commands
def on_get_command(self, request):
# text protocol allows multiple get
for key in re.split(r' +', request.key):
body, flags = self.storage.get(key)
if body is not None:
self.write(("VALUE %s %d %d\r\n" % (key, flags, len(body))).encode("utf-8"))
self.write(body + b"\r\n")
self.write(b"END\r\n")
self.read_next_command()
def on_delete_command(self, request):
if not request.noreply:
if self.storage.delete(request.key):
self.write(b"DELETED\r\n")
else:
self.write(b"NOT_FOUND\r\n")
self.read_next_command()
def on_touch_command(self, request):
if not request.noreply:
if self.storage.touch(request.key):
self.write(b"TOUCHED\r\n")
else:
self.write(b"NOT_FOUND\r\n")
self.read_next_command()
## other commands
def on_quit_command(self, request):
self.finish()
def on_stats_command(self, request):
for (key, val) in sorted(self.storage.stats().items()):
self.write(("STAT %s %s\r\n" % (key, str(val))).encode("utf-8"))
self.write(b"END\r\n")
self.read_next_command()
def on_version_command(self, request):
self.write(("VERSION %s\r\n" % self.storage.version()).encode("utf-8"))
self.read_next_command()
class MemcacheBinaryProtocolHandler(MemcacheProtocolHandler):
def __init__(self, stream, address, storage, buf=None):
if buf is not None:
magic = struct.unpack(b"B", buf[0:1])
if magic != 0x80:
raise torncached.errors.ProtocolError("not binary protocol")
super(MemcacheBinaryProtocolHandler, self).__init__(stream, address, storage)
raise torncached.errors.ProtocolError("not implemented")
class MemcacheCommand(object):
pass
class MemcacheAsciiCommand(MemcacheCommand):
def __init__(self, command, key, flags=None, exptime=None, noreply=False, body=None):
super(MemcacheAsciiCommand, self).__init__()
self.command = command
self.key = key
self.flags = 0 if flags is None else flags
self.exptime = 0 if exptime is None else exptime
self.noreply = not not noreply
if isinstance(body, str):
self.body = body.encode("utf-8")
else:
self.body = body or b""
class MemcacheBinaryCommand(MemcacheCommand):
def __init__(self, *args, **kwargs):
super(MemcacheBinaryCommand, self).__init__()
def main():
torncached.options.define_options()
tornado.options.parse_command_line(sys.argv)
server = torncached.server.MemcacheServer()
server.listen(tornado.options.options.port)
logging.info("server listening (%d/tcp)" % tornado.options.options.port)
if tornado.options.options.autoreload:
logging.info("autoreload is enabled")
tornado.autoreload.start()
if tornado.options.options.slowdown:
logging.info("simulate response slowdown of %.1f second(s)" % tornado.options.options.slowdown)
tornado.ioloop.IOLoop.instance().start()
# vim:set ft=python :
| [
"#!/usr/bin/env python\n",
"\n",
"from __future__ import unicode_literals\n",
"import collections\n",
"import datetime\n",
"import functools\n",
"import logging\n",
"import os\n",
"import re\n",
"import struct\n",
"import sys\n",
"import tornado.autoreload\n",
"import tornado.ioloop\n",
"import tornado.options\n",
"import tornado.tcpserver\n",
"import tornado.stack_context\n",
"from tornado.util import bytes_type\n",
"import torncached.errors\n",
"import torncached.options\n",
"import torncached.storage\n",
"\n",
"class MemcacheServer(tornado.tcpserver.TCPServer):\n",
" def __init__(self, *args, **kwargs):\n",
" super(MemcacheServer, self).__init__()\n",
" self._storage = torncached.storage.MemcacheStorage()\n",
"\n",
" def handle_stream(self, stream, address):\n",
" MemcacheConnection(stream, address, self._storage)\n",
"\n",
"class MemcacheConnection(object):\n",
" def __init__(self, stream, address, storage):\n",
" stream.read_bytes(1, functools.partial(self.detect_protocol, stream, address, storage))\n",
"\n",
" def detect_protocol(self, stream, address, storage, buf):\n",
" try:\n",
" self._protocol = MemcacheBinaryProtocolHandler(stream, address, storage, buf)\n",
" except torncached.errors.ProtocolError:\n",
" self._protocol = MemcacheAsciiProtocolHandler(stream, address, storage, buf)\n",
"\n",
"class MemcacheProtocolHandler(object):\n",
" def __init__(self, stream, address, storage):\n",
" self.stream = stream\n",
" self.address = address\n",
" self.storage = storage\n",
"\n",
"class MemcacheAsciiProtocolHandler(MemcacheProtocolHandler):\n",
" def __init__(self, stream, address, storage, buf=None):\n",
" super(MemcacheAsciiProtocolHandler, self).__init__(stream, address, storage)\n",
" self._request_finished = False\n",
" self._header_callback = tornado.stack_context.wrap(self._on_headers)\n",
" self._write_callback = None\n",
" logging.info(\"%d: Client using the ascii protocol\" % (stream.fileno()))\n",
" self.read_next_command(buf)\n",
"\n",
" def close(self):\n",
" logging.info(\"<%d connection closed.\" % (self.stream.fileno()))\n",
" self.stream.close()\n",
"\n",
" def write(self, chunk, callback=None):\n",
" assert self._request, \"Request closed\"\n",
" if not self.stream.closed():\n",
" try:\n",
" logging.debug(\">%d %s\" % (self.stream.fileno(), chunk.rstrip().decode(\"utf-8\")))\n",
" except UnicodeDecodeError:\n",
" logging.info(\">%d sending %d bytes\" % (self.stream.fileno(), len(chunk)))\n",
" self._write_callback = tornado.stack_context.wrap(callback)\n",
" self.stream.write(chunk, self._on_write_complete)\n",
"\n",
" def finish(self):\n",
" assert self._request, \"Request closed\"\n",
" self._request_finished = True\n",
" if not self.stream.writing():\n",
" self._finish_request()\n",
"\n",
" def _on_write_complete(self):\n",
" if self._write_callback is not None:\n",
" callback = self._write_callback\n",
" self._write_callback = None\n",
" callback()\n",
" if self._request_finished and not self.stream.writing():\n",
" self._finish_request()\n",
"\n",
" def _finish_request(self):\n",
" self._request = None\n",
" self._request_finished = False\n",
" self.close()\n",
"\n",
" STORAGE_COMMANDS = re.compile(r'^([a-z]+) +(\\S+) +(\\d+) +(\\d+) +(\\d+)(?: +(noreply))?$')\n",
" RETRIEVAL_COMMANDS = re.compile(r'^([a-z]+)(?: +(.*))?$')\n",
"\n",
" def _on_headers(self, data):\n",
" data = data.rstrip().decode(\"utf-8\")\n",
" logging.info(\"<%d %s\" % (self.stream.fileno(), data))\n",
" s = self.STORAGE_COMMANDS.match(data)\n",
" if s is not None:\n",
" command, key, flags, exptime, _bytes, noreply = s.groups()\n",
" self._request = MemcacheAsciiCommand(command, key,\n",
" flags=0 if flags is None else int(flags),\n",
" exptime=0 if exptime is None else int(exptime),\n",
" noreply=noreply==\"noreply\")\n",
" content_length = 0 if _bytes is None else int(_bytes)\n",
" if 0 < content_length:\n",
" self.stream.read_bytes(content_length, self._on_request_body)\n",
" else:\n",
" self.write(b\"ERROR\\r\\n\")\n",
" else:\n",
" r = self.RETRIEVAL_COMMANDS.match(data)\n",
" if r is not None:\n",
" command, key = r.groups()\n",
" self._request = MemcacheAsciiCommand(command, \"\" if key is None else key)\n",
" self.request_callback(self._request)\n",
" else:\n",
" self._request = MemcacheAsciiCommand(\"\", \"\")\n",
" self.write(b\"ERROR\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def _on_request_body(self, data):\n",
" def __on_request_body(newline):\n",
" self._request.body = data\n",
" self.request_callback(self._request)\n",
" self.stream.read_until_regex(b\"\\r?\\n\", __on_request_body) # skip trailing newline\n",
"\n",
" def request_callback(self, request):\n",
" command = \"on_%s_command\" % request.command\n",
" if hasattr(self, command):\n",
" getattr(self, command)(request)\n",
" else:\n",
" self.write(b\"ERROR\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def read_next_command(self, buf=None):\n",
" def prepend_buffer(data):\n",
" if buf is not None:\n",
" data = buf + data\n",
" self._header_callback(data)\n",
"\n",
" def read_command():\n",
" self._request = None\n",
" self.stream.read_until_regex(b\"\\r?\\n\", prepend_buffer)\n",
"\n",
" if 0.0 < tornado.options.options.slowdown:\n",
" timedelta = datetime.timedelta(seconds=tornado.options.options.slowdown)\n",
" self.stream.io_loop.add_timeout(timedelta, read_command)\n",
" else:\n",
" read_command()\n",
"\n",
" ## Storage commands\n",
" def on_set_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.set(request.key, request.body, request.flags, request.exptime):\n",
" self.write(b\"STORED\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_add_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.add(request.key, request.body, request.flags, request.exptime):\n",
" self.write(b\"STORED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_STORED\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_replace_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.replace(request.key, request.body, request.flags, request.exptime):\n",
" self.write(b\"STORED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_STORED\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_append_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.append(request.key, request.body, request.flags, request.exptime):\n",
" self.write(b\"STORED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_STORED\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_prepend_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.prepend(request.key, request.body, request.flags, request.exptime):\n",
" self.write(b\"STORED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_STORED\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" ## Retrieval commands\n",
" def on_get_command(self, request):\n",
" # text protocol allows multiple get\n",
" for key in re.split(r' +', request.key):\n",
" body, flags = self.storage.get(key)\n",
" if body is not None:\n",
" self.write((\"VALUE %s %d %d\\r\\n\" % (key, flags, len(body))).encode(\"utf-8\"))\n",
" self.write(body + b\"\\r\\n\")\n",
" self.write(b\"END\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_delete_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.delete(request.key):\n",
" self.write(b\"DELETED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_FOUND\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_touch_command(self, request):\n",
" if not request.noreply:\n",
" if self.storage.touch(request.key):\n",
" self.write(b\"TOUCHED\\r\\n\")\n",
" else:\n",
" self.write(b\"NOT_FOUND\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" ## other commands\n",
" def on_quit_command(self, request):\n",
" self.finish()\n",
"\n",
" def on_stats_command(self, request):\n",
" for (key, val) in sorted(self.storage.stats().items()):\n",
" self.write((\"STAT %s %s\\r\\n\" % (key, str(val))).encode(\"utf-8\"))\n",
" self.write(b\"END\\r\\n\")\n",
" self.read_next_command()\n",
"\n",
" def on_version_command(self, request):\n",
" self.write((\"VERSION %s\\r\\n\" % self.storage.version()).encode(\"utf-8\"))\n",
" self.read_next_command()\n",
"\n",
"class MemcacheBinaryProtocolHandler(MemcacheProtocolHandler):\n",
" def __init__(self, stream, address, storage, buf=None):\n",
" if buf is not None:\n",
" magic = struct.unpack(b\"B\", buf[0:1])\n",
" if magic != 0x80:\n",
" raise torncached.errors.ProtocolError(\"not binary protocol\")\n",
"\n",
" super(MemcacheBinaryProtocolHandler, self).__init__(stream, address, storage)\n",
" raise torncached.errors.ProtocolError(\"not implemented\")\n",
"\n",
"class MemcacheCommand(object):\n",
" pass\n",
"\n",
"class MemcacheAsciiCommand(MemcacheCommand):\n",
" def __init__(self, command, key, flags=None, exptime=None, noreply=False, body=None):\n",
" super(MemcacheAsciiCommand, self).__init__()\n",
" self.command = command\n",
" self.key = key\n",
" self.flags = 0 if flags is None else flags\n",
" self.exptime = 0 if exptime is None else exptime\n",
" self.noreply = not not noreply\n",
" if isinstance(body, str):\n",
" self.body = body.encode(\"utf-8\")\n",
" else:\n",
" self.body = body or b\"\"\n",
"\n",
"class MemcacheBinaryCommand(MemcacheCommand):\n",
" def __init__(self, *args, **kwargs):\n",
" super(MemcacheBinaryCommand, self).__init__()\n",
"\n",
"def main():\n",
" torncached.options.define_options()\n",
" tornado.options.parse_command_line(sys.argv)\n",
" server = torncached.server.MemcacheServer()\n",
" server.listen(tornado.options.options.port)\n",
" logging.info(\"server listening (%d/tcp)\" % tornado.options.options.port)\n",
" if tornado.options.options.autoreload:\n",
" logging.info(\"autoreload is enabled\")\n",
" tornado.autoreload.start()\n",
" if tornado.options.options.slowdown:\n",
" logging.info(\"simulate response slowdown of %.1f second(s)\" % tornado.options.options.slowdown)\n",
" tornado.ioloop.IOLoop.instance().start()\n",
"\n",
"# vim:set ft=python :\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.010416666666666666,
0,
0,
0,
0.011111111111111112,
0,
0.011235955056179775,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0.01639344262295082,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0.014705882352941176,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0.02,
0.03571428571428571,
0.025,
0.022727272727272728,
0.0196078431372549,
0.0625,
0.01694915254237288,
0.02564102564102564,
0.02564102564102564,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0.03225806451612903,
0,
0,
0.022222222222222223,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0
] | 270 | 0.003428 |
import datetime
from django.conf import settings
from django.core import signing
from django.core.mail import send_mail
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import generic
from django.views.decorators.debug import sensitive_post_parameters
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError:
from django.contrib.sites.models import get_current_site
from .forms import PasswordRecoveryForm, PasswordResetForm
from .signals import user_recovers_password
from .utils import get_user_model, get_username
class SaltMixin(object):
salt = 'password_recovery'
url_salt = 'password_recovery_url'
def loads_with_timestamp(value, salt):
"""Returns the unsigned value along with its timestamp, the time when it
got dumped."""
try:
signing.loads(value, salt=salt, max_age=-1)
except signing.SignatureExpired as e:
age = float(str(e).split('Signature age ')[1].split(' >')[0])
timestamp = timezone.now() - datetime.timedelta(seconds=age)
return timestamp, signing.loads(value, salt=salt)
class RecoverDone(SaltMixin, generic.TemplateView):
template_name = 'password_reset/reset_sent.html'
def get_context_data(self, **kwargs):
ctx = super(RecoverDone, self).get_context_data(**kwargs)
try:
ctx['timestamp'], ctx['email'] = loads_with_timestamp(
self.kwargs['signature'], salt=self.url_salt,
)
except signing.BadSignature:
raise Http404
return ctx
recover_done = RecoverDone.as_view()
class Recover(SaltMixin, generic.FormView):
case_sensitive = True
form_class = PasswordRecoveryForm
template_name = 'password_reset/recovery_form.html'
success_url_name = 'password_reset:password_reset_sent'
email_template_name = 'password_reset/recovery_email.txt'
email_subject_template_name = 'password_reset/recovery_email_subject.txt'
search_fields = ['username', 'email']
def get_success_url(self):
return reverse(self.success_url_name, args=[self.mail_signature])
def get_context_data(self, **kwargs):
kwargs['url'] = self.request.get_full_path()
return super(Recover, self).get_context_data(**kwargs)
def get_form_kwargs(self):
kwargs = super(Recover, self).get_form_kwargs()
kwargs.update({
'case_sensitive': self.case_sensitive,
'search_fields': self.search_fields,
})
return kwargs
def get_site(self):
return get_current_site(self.request)
def send_notification(self):
context = {
'site': self.get_site(),
'user': self.user,
'username': get_username(self.user),
'token': signing.dumps(self.user.pk, salt=self.salt),
'secure': self.request.is_secure(),
}
body = loader.render_to_string(self.email_template_name,
context).strip()
subject = loader.render_to_string(self.email_subject_template_name,
context).strip()
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[self.user.email])
def form_valid(self, form):
self.user = form.cleaned_data['user']
self.send_notification()
if (
len(self.search_fields) == 1 and
self.search_fields[0] == 'username'
):
# if we only search by username, don't disclose the user email
# since it may now be public information.
email = self.user.username
else:
email = self.user.email
self.mail_signature = signing.dumps(email, salt=self.url_salt)
return super(Recover, self).form_valid(form)
recover = Recover.as_view()
class Reset(SaltMixin, generic.FormView):
form_class = PasswordResetForm
token_expires = 3600 * 48 # Two days
template_name = 'password_reset/reset.html'
success_url = reverse_lazy('password_reset:password_reset_done')
@method_decorator(sensitive_post_parameters('password1', 'password2'))
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
self.user = None
try:
pk = signing.loads(kwargs['token'], max_age=self.token_expires,
salt=self.salt)
except signing.BadSignature:
return self.invalid()
self.user = get_object_or_404(get_user_model(), pk=pk)
return super(Reset, self).dispatch(request, *args, **kwargs)
def invalid(self):
return self.render_to_response(self.get_context_data(invalid=True))
def get_form_kwargs(self):
kwargs = super(Reset, self).get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def get_context_data(self, **kwargs):
ctx = super(Reset, self).get_context_data(**kwargs)
if 'invalid' not in ctx:
ctx.update({
'username': get_username(self.user),
'token': self.kwargs['token'],
})
return ctx
def form_valid(self, form):
form.save()
user_recovers_password.send(
sender=get_user_model(),
user=form.user,
request=self.request
)
return redirect(self.get_success_url())
reset = Reset.as_view()
class ResetDone(generic.TemplateView):
template_name = 'password_reset/recovery_done.html'
reset_done = ResetDone.as_view()
| [
"import datetime\n",
"\n",
"from django.conf import settings\n",
"from django.core import signing\n",
"from django.core.mail import send_mail\n",
"from django.core.urlresolvers import reverse, reverse_lazy\n",
"from django.http import Http404\n",
"from django.shortcuts import get_object_or_404, redirect\n",
"from django.template import loader\n",
"from django.utils import timezone\n",
"from django.utils.decorators import method_decorator\n",
"from django.views import generic\n",
"from django.views.decorators.debug import sensitive_post_parameters\n",
"\n",
"try:\n",
" from django.contrib.sites.shortcuts import get_current_site\n",
"except ImportError:\n",
" from django.contrib.sites.models import get_current_site\n",
"\n",
"from .forms import PasswordRecoveryForm, PasswordResetForm\n",
"from .signals import user_recovers_password\n",
"from .utils import get_user_model, get_username\n",
"\n",
"\n",
"class SaltMixin(object):\n",
" salt = 'password_recovery'\n",
" url_salt = 'password_recovery_url'\n",
"\n",
"\n",
"def loads_with_timestamp(value, salt):\n",
" \"\"\"Returns the unsigned value along with its timestamp, the time when it\n",
" got dumped.\"\"\"\n",
" try:\n",
" signing.loads(value, salt=salt, max_age=-1)\n",
" except signing.SignatureExpired as e:\n",
" age = float(str(e).split('Signature age ')[1].split(' >')[0])\n",
" timestamp = timezone.now() - datetime.timedelta(seconds=age)\n",
" return timestamp, signing.loads(value, salt=salt)\n",
"\n",
"\n",
"class RecoverDone(SaltMixin, generic.TemplateView):\n",
" template_name = 'password_reset/reset_sent.html'\n",
"\n",
" def get_context_data(self, **kwargs):\n",
" ctx = super(RecoverDone, self).get_context_data(**kwargs)\n",
" try:\n",
" ctx['timestamp'], ctx['email'] = loads_with_timestamp(\n",
" self.kwargs['signature'], salt=self.url_salt,\n",
" )\n",
" except signing.BadSignature:\n",
" raise Http404\n",
" return ctx\n",
"recover_done = RecoverDone.as_view()\n",
"\n",
"\n",
"class Recover(SaltMixin, generic.FormView):\n",
" case_sensitive = True\n",
" form_class = PasswordRecoveryForm\n",
" template_name = 'password_reset/recovery_form.html'\n",
" success_url_name = 'password_reset:password_reset_sent'\n",
" email_template_name = 'password_reset/recovery_email.txt'\n",
" email_subject_template_name = 'password_reset/recovery_email_subject.txt'\n",
" search_fields = ['username', 'email']\n",
"\n",
" def get_success_url(self):\n",
" return reverse(self.success_url_name, args=[self.mail_signature])\n",
"\n",
" def get_context_data(self, **kwargs):\n",
" kwargs['url'] = self.request.get_full_path()\n",
" return super(Recover, self).get_context_data(**kwargs)\n",
"\n",
" def get_form_kwargs(self):\n",
" kwargs = super(Recover, self).get_form_kwargs()\n",
" kwargs.update({\n",
" 'case_sensitive': self.case_sensitive,\n",
" 'search_fields': self.search_fields,\n",
" })\n",
" return kwargs\n",
"\n",
" def get_site(self):\n",
" return get_current_site(self.request)\n",
"\n",
" def send_notification(self):\n",
" context = {\n",
" 'site': self.get_site(),\n",
" 'user': self.user,\n",
" 'username': get_username(self.user),\n",
" 'token': signing.dumps(self.user.pk, salt=self.salt),\n",
" 'secure': self.request.is_secure(),\n",
" }\n",
" body = loader.render_to_string(self.email_template_name,\n",
" context).strip()\n",
" subject = loader.render_to_string(self.email_subject_template_name,\n",
" context).strip()\n",
" send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n",
" [self.user.email])\n",
"\n",
" def form_valid(self, form):\n",
" self.user = form.cleaned_data['user']\n",
" self.send_notification()\n",
" if (\n",
" len(self.search_fields) == 1 and\n",
" self.search_fields[0] == 'username'\n",
" ):\n",
" # if we only search by username, don't disclose the user email\n",
" # since it may now be public information.\n",
" email = self.user.username\n",
" else:\n",
" email = self.user.email\n",
" self.mail_signature = signing.dumps(email, salt=self.url_salt)\n",
" return super(Recover, self).form_valid(form)\n",
"recover = Recover.as_view()\n",
"\n",
"\n",
"class Reset(SaltMixin, generic.FormView):\n",
" form_class = PasswordResetForm\n",
" token_expires = 3600 * 48 # Two days\n",
" template_name = 'password_reset/reset.html'\n",
" success_url = reverse_lazy('password_reset:password_reset_done')\n",
"\n",
" @method_decorator(sensitive_post_parameters('password1', 'password2'))\n",
" def dispatch(self, request, *args, **kwargs):\n",
" self.request = request\n",
" self.args = args\n",
" self.kwargs = kwargs\n",
" self.user = None\n",
"\n",
" try:\n",
" pk = signing.loads(kwargs['token'], max_age=self.token_expires,\n",
" salt=self.salt)\n",
" except signing.BadSignature:\n",
" return self.invalid()\n",
"\n",
" self.user = get_object_or_404(get_user_model(), pk=pk)\n",
" return super(Reset, self).dispatch(request, *args, **kwargs)\n",
"\n",
" def invalid(self):\n",
" return self.render_to_response(self.get_context_data(invalid=True))\n",
"\n",
" def get_form_kwargs(self):\n",
" kwargs = super(Reset, self).get_form_kwargs()\n",
" kwargs['user'] = self.user\n",
" return kwargs\n",
"\n",
" def get_context_data(self, **kwargs):\n",
" ctx = super(Reset, self).get_context_data(**kwargs)\n",
" if 'invalid' not in ctx:\n",
" ctx.update({\n",
" 'username': get_username(self.user),\n",
" 'token': self.kwargs['token'],\n",
" })\n",
" return ctx\n",
"\n",
" def form_valid(self, form):\n",
" form.save()\n",
" user_recovers_password.send(\n",
" sender=get_user_model(),\n",
" user=form.user,\n",
" request=self.request\n",
" )\n",
" return redirect(self.get_success_url())\n",
"reset = Reset.as_view()\n",
"\n",
"\n",
"class ResetDone(generic.TemplateView):\n",
" template_name = 'password_reset/recovery_done.html'\n",
"\n",
"\n",
"reset_done = ResetDone.as_view()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0
] | 169 | 0.000618 |
from pyramid.decorator import reify
from pyramid.path import DottedNameResolver
from sandglass.time import _
from sandglass.time.filters import QueryFilter
from sandglass.time.filters import QueryFilterError
class CollectionByPrimaryKey(QueryFilter):
"""
Filter query results for a list of IDs.
This filter only works on collections.
"""
applies_to_admin = True
# Supported request methods for this filter
supported_methods = ('GET', )
def __init__(self, model, *args, **kwargs):
super(CollectionByPrimaryKey, self).__init__(*args, **kwargs)
self._model = model
@reify
def model(self):
# When model is not a class resolve it to be a class
resolver = DottedNameResolver()
return resolver.maybe_resolve(self._model)
def applies_to(self, resource):
if resource.request.method not in self.supported_methods:
return False
elif not resource.is_collection_request:
return False
return super(CollectionByPrimaryKey, self).applies_to(resource)
@staticmethod
def validate(values):
# Add a limit for the number of values
max_values = 100
if len(values) > max_values:
msg = _(u"A maximum of {} IDs are allowed per request")
return msg.format(max_values)
# Check each value in list
for value in values:
try:
int(value)
except (ValueError, TypeError):
return _(u"Invalid ID value")
def filter_query(self, query, request, resource):
if 'id' not in request.GET:
return query
id_list = request.GET.getall('id')
message = self.validate(id_list)
if message:
raise QueryFilterError(message)
return query.filter(self.model.id.in_(id_list))
| [
"from pyramid.decorator import reify\n",
"from pyramid.path import DottedNameResolver\n",
"\n",
"from sandglass.time import _\n",
"from sandglass.time.filters import QueryFilter\n",
"from sandglass.time.filters import QueryFilterError\n",
"\n",
"\n",
"class CollectionByPrimaryKey(QueryFilter):\n",
" \"\"\"\n",
" Filter query results for a list of IDs.\n",
"\n",
" This filter only works on collections.\n",
"\n",
" \"\"\"\n",
" applies_to_admin = True\n",
"\n",
" # Supported request methods for this filter\n",
" supported_methods = ('GET', )\n",
"\n",
" def __init__(self, model, *args, **kwargs):\n",
" super(CollectionByPrimaryKey, self).__init__(*args, **kwargs)\n",
" self._model = model\n",
"\n",
" @reify\n",
" def model(self):\n",
" # When model is not a class resolve it to be a class\n",
" resolver = DottedNameResolver()\n",
" return resolver.maybe_resolve(self._model)\n",
"\n",
" def applies_to(self, resource):\n",
" if resource.request.method not in self.supported_methods:\n",
" return False\n",
" elif not resource.is_collection_request:\n",
" return False\n",
"\n",
" return super(CollectionByPrimaryKey, self).applies_to(resource)\n",
"\n",
" @staticmethod\n",
" def validate(values):\n",
" # Add a limit for the number of values\n",
" max_values = 100\n",
" if len(values) > max_values:\n",
" msg = _(u\"A maximum of {} IDs are allowed per request\")\n",
" return msg.format(max_values)\n",
"\n",
" # Check each value in list\n",
" for value in values:\n",
" try:\n",
" int(value)\n",
" except (ValueError, TypeError):\n",
" return _(u\"Invalid ID value\")\n",
"\n",
" def filter_query(self, query, request, resource):\n",
" if 'id' not in request.GET:\n",
" return query\n",
"\n",
" id_list = request.GET.getall('id')\n",
" message = self.validate(id_list)\n",
" if message:\n",
" raise QueryFilterError(message)\n",
"\n",
" return query.filter(self.model.id.in_(id_list))\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 63 | 0 |
from Robot import *
import unittest
class TestTransform(unittest.TestCase):
def test_stop(self):
state = {
"right_analog_x": 0,
"right_analog_y": 0,
}
res1 = map(state)
self.assertEqual(res1, (0, 0))
def test_full_forward(self):
state = {
"right_analog_x": 0,
"right_analog_y": 255,
}
res1 = map(state)
self.assertEqual(res1, (255, 255))
state = {
"right_analog_x": 0,
"right_analog_y": 127,
}
res1 = map(state)
self.assertEqual(res1, (127, 127))
def test_full_backward(self):
state = {
"right_analog_x": 0,
"right_analog_y": -255,
}
res1 = map(state)
self.assertEqual(res1, (-255, -255))
state = {
"right_analog_x": 0,
"right_analog_y": -127,
}
res1 = map(state)
self.assertEqual(res1, (-127, -127))
def test_turn_left(self):
state = {
"right_analog_x": -255,
"right_analog_y": 0,
}
res1 = map(state)
self.assertEqual(res1, (0, 255))
state = {
"right_analog_x": -255,
"right_analog_y": 255,
}
res1 = map(state)
self.assertEqual(res1, (0, 255))
def test_turn_right(self):
state = {
"right_analog_x": 255,
"right_analog_y": 255,
}
res1 = map(state)
self.assertEqual(res1, (255, 0))
state = {
"right_analog_x": 255,
"right_analog_y": 0,
}
res1 = map(state)
self.assertEqual(res1, (255, 0))
if __name__ == "__main__":
unittest.main()
| [
"from Robot import *\n",
"\n",
"import unittest\n",
"\n",
"\n",
"class TestTransform(unittest.TestCase):\n",
" def test_stop(self):\n",
" state = {\n",
" \"right_analog_x\": 0,\n",
" \"right_analog_y\": 0,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (0, 0))\n",
"\n",
" def test_full_forward(self):\n",
" state = {\n",
" \"right_analog_x\": 0,\n",
" \"right_analog_y\": 255,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (255, 255))\n",
"\n",
" state = {\n",
" \"right_analog_x\": 0,\n",
" \"right_analog_y\": 127,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (127, 127))\n",
"\n",
" def test_full_backward(self):\n",
" state = {\n",
" \"right_analog_x\": 0,\n",
" \"right_analog_y\": -255,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (-255, -255))\n",
"\n",
" state = {\n",
" \"right_analog_x\": 0,\n",
" \"right_analog_y\": -127,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (-127, -127))\n",
"\n",
" def test_turn_left(self):\n",
" state = {\n",
" \"right_analog_x\": -255,\n",
" \"right_analog_y\": 0,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (0, 255))\n",
"\n",
" state = {\n",
" \"right_analog_x\": -255,\n",
" \"right_analog_y\": 255,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (0, 255))\n",
"\n",
" def test_turn_right(self):\n",
" state = {\n",
" \"right_analog_x\": 255,\n",
" \"right_analog_y\": 255,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (255, 0))\n",
"\n",
" state = {\n",
" \"right_analog_x\": 255,\n",
" \"right_analog_y\": 0,\n",
" }\n",
"\n",
" res1 = map(state)\n",
"\n",
" self.assertEqual(res1, (255, 0))\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" unittest.main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 95 | 0 |
#!/usr/bin/python3
####################################################################################################
#
# Project : CHEMION Decoder
#
# Program name : decoder.py
#
# Author(s) : Jurre Groenendijk
# Jilles Groenendijk
#
# Date created : 20191220
#
# Purpose : Convert a 128 character UART string to output on Glasses
#
# Revision History :
#
# Date Author Version Revision (Date in YYYYMMDD format)
# 20191220 jurre 1.00 Initial release (Python2)
# 20191222 jilles 1.01 Converted to Python3 added parameter input and fault handling
# 20200103 jilles 1.02 Messages are 128 nibbles = 64 bytes, added argparse
# 20200111 jilles 1.03 Last line was not interpreted, fixed
# 20200111 jilles 1.04 Added reference to encoder
#
####################################################################################################
# On Android:
# 1. Install Chemion app
# 2. Developer options
# [x] Enable Bluetooth HCI snoop log
# [x] USB debugging
# 3. Connect to Chemion glasses
# 4. Upload animations
# 5. Disconnect from Chemion glasses
# 6. Developer options
# [ ] Enable Bluetooth HCI snoop log
# 7. Developer options
# [x] Enable Bluetooth HCI snoop log
#
# On linux:
# 1. $ apt install adb
# 2. $ adb shell
# 3. shell@device:/ $ cd /sdcard
# 4. shell@device:/ $ ls -l btsnoop_hci.log
# -rw-rw---- root sdcard_r 16 2020-01-03 15:34 btsnoop_hci.log
# 5. shell@device:/ $ exit
# 6. $ adb pull /sdcard/btsnoop_hci.log btsnoop_hci.pcap
# /sdcard/btsnoop_hci.log: 1 file pulled. 0.0 MB/s (16 bytes in 0.002s)
# 7. $ echo $(echo $(tshark -r btsnoop_hci.pcap -T jsonraw) | sed -e 's/,/\n/g' | grep btgatt.nordic.uart_tx_raw| cut -d\" -f4 | sed -e 's/^fa/~fa/g') | sed -e 's/ //g' -e 's/~/\n/g' > uartstrings.txt
# 8. $ for f in $(cat uartstrings);do clear;./decoder.py $f -q;sleep .1;done
#
# X X X X X X X X X X
# X X X X X X X X X X X
# X X X X X X X X X X X
# X X X X X X X X X X X
# X X X X X X X X X X
# X X X X X X X X X X X
# X X X X X X X X X X X X
#
# Note: there is also an encoder available to create uartstrings
import sys
import argparse
line_length = 128
parser = argparse.ArgumentParser(description="decoder.py - v 1.04 by Jurre & Jilles Groenendijk")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("uartstring", type=str,help="the uartstring (128 character)")
args = parser.parse_args()
# Get string from stdin
data=args.uartstring
if( data=="-"):
data = input()
if(len(data)!=line_length):
if(not args.quiet):
print("decoder.py: error: length of uartstring("+str(len(data))+") !=",line_length)
quit()
# data = fa0300390100060000000000003f0f03c3c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c000f03ccf33c0c03f0f03c3fcc0000000000000cb55a9
header = data[:14] # fa030039010006
body = data[14:122] # 0000000000003f0f03c3c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c000f03ccf33c0c03f0f03c3fcc0000000000000
crc = data[122:124] # cb
footer = data[124:] # 55a9
if(args.debug):
print("DEBUG")
print("Header: ", header," (",len(header),")")
print("Body : ", body," (",len(body),")")
print("CRC : ", crc," (",len(crc),")")
print("Footer: ", footer," (",len(footer),")")
print()
all_bits="".join(["0"*(4-len(str(bin(int(x,16)))[2:]))+str(bin(int(x,16)))[2:] for x in body])
all_pixels = [all_bits[x:x+2] for x in range(0, len(all_bits),2)]
for x in range(0, 9):
if( args.verbose ): print( " ", x, "| " , end="" )
for y in range(0, 24):
pixel=all_pixels[x*24+y]
if(pixel=="00"): print( " ", end="" )
if(pixel=="01"): print( "- ", end="")
if(pixel=="10"): print( "x ", end="")
if(pixel=="11"): print( "X ", end="")
print()
if( args.verbose ):
print( " +------------------------------------------------" )
print( " ",end="" )
for y in range(0, 24):
print(int(y / 10),end=" ")
print()
print( " ",end="" )
for y in range(0, 24):
print((y % 10),end=" ")
print()
## EOF ##
| [
"#!/usr/bin/python3\n",
"####################################################################################################\n",
"#\n",
"# Project : CHEMION Decoder\n",
"#\n",
"# Program name : decoder.py\n",
"#\n",
"# Author(s) : Jurre Groenendijk\n",
"# Jilles Groenendijk\n",
"#\n",
"# Date created : 20191220\n",
"#\n",
"# Purpose : Convert a 128 character UART string to output on Glasses\n",
"#\n",
"# Revision History :\n",
"#\n",
"# Date Author Version Revision (Date in YYYYMMDD format) \n",
"# 20191220 jurre 1.00 Initial release (Python2)\n",
"# 20191222 jilles 1.01 Converted to Python3 added parameter input and fault handling \n",
"# 20200103 jilles 1.02 Messages are 128 nibbles = 64 bytes, added argparse\n",
"# 20200111 jilles 1.03 Last line was not interpreted, fixed\n",
"# 20200111 jilles 1.04 Added reference to encoder\n",
"#\n",
"####################################################################################################\n",
"\n",
"# On Android:\n",
"# 1. Install Chemion app\n",
"# 2. Developer options\n",
"# [x] Enable Bluetooth HCI snoop log\n",
"# [x] USB debugging\n",
"# 3. Connect to Chemion glasses\n",
"# 4. Upload animations\n",
"# 5. Disconnect from Chemion glasses\n",
"# 6. Developer options\n",
"# [ ] Enable Bluetooth HCI snoop log\n",
"# 7. Developer options\n",
"# [x] Enable Bluetooth HCI snoop log\n",
"#\n",
"# On linux:\n",
"# 1. $ apt install adb\n",
"# 2. $ adb shell\n",
"# 3. shell@device:/ $ cd /sdcard\n",
"# 4. shell@device:/ $ ls -l btsnoop_hci.log \n",
"# -rw-rw---- root sdcard_r 16 2020-01-03 15:34 btsnoop_hci.log\n",
"# 5. shell@device:/ $ exit\n",
"# 6. $ adb pull /sdcard/btsnoop_hci.log btsnoop_hci.pcap\n",
"# /sdcard/btsnoop_hci.log: 1 file pulled. 0.0 MB/s (16 bytes in 0.002s)\n",
"# 7. $ echo $(echo $(tshark -r btsnoop_hci.pcap -T jsonraw) | sed -e 's/,/\\n/g' | grep btgatt.nordic.uart_tx_raw| cut -d\\\" -f4 | sed -e 's/^fa/~fa/g') | sed -e 's/ //g' -e 's/~/\\n/g' > uartstrings.txt\n",
"# 8. $ for f in $(cat uartstrings);do clear;./decoder.py $f -q;sleep .1;done\n",
"# \n",
"# X X X X X X X X X X \n",
"# X X X X X X X X X X X \n",
"# X X X X X X X X X X X \n",
"# X X X X X X X X X X X \n",
"# X X X X X X X X X X \n",
"# X X X X X X X X X X X \n",
"# X X X X X X X X X X X X \n",
"#\n",
"# Note: there is also an encoder available to create uartstrings\n",
"\n",
"import sys\n",
"import argparse\n",
"\n",
"line_length = 128\n",
"\n",
"parser = argparse.ArgumentParser(description=\"decoder.py - v 1.04 by Jurre & Jilles Groenendijk\")\n",
"parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n",
"parser.add_argument(\"-d\", \"--debug\", action=\"store_true\")\n",
"parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n",
"parser.add_argument(\"uartstring\", type=str,help=\"the uartstring (128 character)\")\n",
"args = parser.parse_args()\n",
"\n",
"# Get string from stdin\n",
"data=args.uartstring\n",
"if( data==\"-\"):\n",
" data = input()\n",
"\n",
"if(len(data)!=line_length):\n",
" if(not args.quiet):\n",
" print(\"decoder.py: error: length of uartstring(\"+str(len(data))+\") !=\",line_length)\n",
" quit()\n",
"\n",
"# data = fa0300390100060000000000003f0f03c3c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c000f03ccf33c0c03f0f03c3fcc0000000000000cb55a9\n",
"header = data[:14] # fa030039010006\n",
"body = data[14:122] # 0000000000003f0f03c3c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c0c0f03ccf33c000f03ccf33c0c03f0f03c3fcc0000000000000\n",
"crc = data[122:124] # cb\n",
"footer = data[124:] # 55a9\n",
"if(args.debug):\n",
" print(\"DEBUG\")\n",
" print(\"Header: \", header,\" (\",len(header),\")\")\n",
" print(\"Body : \", body,\" (\",len(body),\")\")\n",
" print(\"CRC : \", crc,\" (\",len(crc),\")\")\n",
" print(\"Footer: \", footer,\" (\",len(footer),\")\")\n",
" print()\n",
"\n",
"all_bits=\"\".join([\"0\"*(4-len(str(bin(int(x,16)))[2:]))+str(bin(int(x,16)))[2:] for x in body])\n",
"\n",
"all_pixels = [all_bits[x:x+2] for x in range(0, len(all_bits),2)]\n",
" \n",
"for x in range(0, 9):\n",
" if( args.verbose ): print( \" \", x, \"| \" , end=\"\" )\n",
" for y in range(0, 24):\n",
" pixel=all_pixels[x*24+y]\n",
" if(pixel==\"00\"): print( \" \", end=\"\" )\n",
" if(pixel==\"01\"): print( \"- \", end=\"\")\n",
" if(pixel==\"10\"): print( \"x \", end=\"\")\n",
" if(pixel==\"11\"): print( \"X \", end=\"\")\n",
" print()\n",
"\n",
"if( args.verbose ):\n",
" print( \" +------------------------------------------------\" )\n",
"\n",
" print( \" \",end=\"\" )\n",
" for y in range(0, 24):\n",
" print(int(y / 10),end=\" \")\n",
" print()\n",
"\n",
" print( \" \",end=\"\" )\n",
" for y in range(0, 24):\n",
" print((y % 10),end=\" \")\n",
" print()\n",
"\n",
"## EOF ##\n"
] | [
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0.02,
0.011235955056179775,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888,
0,
0,
0,
0,
0.0049504950495049506,
0,
0.02,
0.018518518518518517,
0.018518518518518517,
0.018518518518518517,
0.018518518518518517,
0.018518518518518517,
0.018518518518518517,
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0.047619047619047616,
0.125,
0.058823529411764705,
0,
0.03571428571428571,
0.045454545454545456,
0.022727272727272728,
0.1111111111111111,
0,
0.007246376811594203,
0,
0.014925373134328358,
0.07142857142857142,
0,
0,
0.058823529411764705,
0.08163265306122448,
0.08888888888888889,
0.09302325581395349,
0.08163265306122448,
0.1,
0,
0.042105263157894736,
0,
0.015151515151515152,
0.3333333333333333,
0,
0.1320754716981132,
0.04,
0.034482758620689655,
0.09302325581395349,
0.07142857142857142,
0.07142857142857142,
0.07142857142857142,
0.1,
0,
0.1,
0.04477611940298507,
0,
0.14814814814814814,
0.04,
0.03225806451612903,
0.1,
0,
0.14814814814814814,
0.04,
0.03571428571428571,
0.1,
0,
0.1
] | 123 | 0.02606 |
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import os
import pwd
import grp
import ldap
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython.ipa_log_manager import *
from ipapython.dn import DN
from ipapython import sysrestore, ipautil, ipaldap
from ipaplatform.paths import paths
from ipaplatform import services
from ipalib import errors, api
class ODSExporterInstance(service.Service):
def __init__(self, fstore=None, dm_password=None, ldapi=False,
start_tls=False, autobind=ipaldap.AUTOBIND_ENABLED):
service.Service.__init__(
self, "ipa-ods-exporter",
service_desc="IPA OpenDNSSEC exporter daemon",
dm_password=dm_password,
ldapi=ldapi,
autobind=autobind,
start_tls=start_tls
)
self.dm_password = dm_password
self.ods_uid = None
self.ods_gid = None
self.enable_if_exists = False
if fstore:
self.fstore = fstore
else:
self.fstore = sysrestore.FileStore(paths.SYSRESTORE)
suffix = ipautil.dn_attribute_property('_suffix')
def create_instance(self, fqdn, realm_name):
self.backup_state("enabled", self.is_enabled())
self.backup_state("running", self.is_running())
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
self.stop()
except:
pass
# get a connection to the DS
self.ldap_connect()
# checking status step must be first
self.step("checking status", self.__check_dnssec_status)
self.step("setting up DNS Key Exporter", self.__setup_key_exporter)
self.step("setting up kerberos principal", self.__setup_principal)
self.step("disabling default signer daemon", self.__disable_signerd)
self.step("starting DNS Key Exporter", self.__start)
self.step("configuring DNS Key Exporter to start on boot", self.__enable)
self.start_creation()
def __check_dnssec_status(self):
ods_enforcerd = services.knownservices.ods_enforcerd
try:
self.ods_uid = pwd.getpwnam(ods_enforcerd.get_user_name()).pw_uid
except KeyError:
raise RuntimeError("OpenDNSSEC UID not found")
try:
self.ods_gid = grp.getgrnam(ods_enforcerd.get_group_name()).gr_gid
except KeyError:
raise RuntimeError("OpenDNSSEC GID not found")
def __enable(self):
try:
self.ldap_enable('DNSKeyExporter', self.fqdn, self.dm_password,
self.suffix)
except errors.DuplicateEntry:
root_logger.error("DNSKeyExporter service already exists")
def __setup_key_exporter(self):
installutils.set_directive(paths.SYSCONFIG_IPA_ODS_EXPORTER,
'SOFTHSM2_CONF',
paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
def __setup_principal(self):
assert self.ods_uid is not None
dns_exporter_principal = "ipa-ods-exporter/" + self.fqdn + "@" + self.realm
installutils.kadmin_addprinc(dns_exporter_principal)
# Store the keytab on disk
installutils.create_keytab(paths.IPA_ODS_EXPORTER_KEYTAB, dns_exporter_principal)
p = self.move_service(dns_exporter_principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dns_exporter_principal_dn = DN(
('krbprincipalname', dns_exporter_principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dns_exporter_principal_dn = p
# Make sure access is strictly reserved to the ods user
os.chmod(paths.IPA_ODS_EXPORTER_KEYTAB, 0o440)
os.chown(paths.IPA_ODS_EXPORTER_KEYTAB, 0, self.ods_gid)
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),
('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dns_exporter_principal_dn)]
try:
self.admin_conn.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
root_logger.critical("Could not modify principal's %s entry: %s"
% (dns_exporter_principal_dn, str(e)))
raise
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
self.admin_conn.modify_s(dns_exporter_principal_dn, mod)
except Exception as e:
root_logger.critical("Could not set principal's %s LDAP limits: %s"
% (dns_exporter_principal_dn, str(e)))
raise
def __disable_signerd(self):
signerd_service = services.knownservices.ods_signerd
self.backup_state("singerd_running", signerd_service.is_running())
self.backup_state("singerd_enabled", signerd_service.is_enabled())
# disable default opendnssec signer daemon
signerd_service.stop()
signerd_service.mask()
def __start(self):
self.start()
def remove_service(self):
dns_exporter_principal = ("ipa-ods-exporter/%s@%s" % (self.fqdn,
self.realm))
try:
api.Command.service_del(dns_exporter_principal)
except errors.NotFound:
pass
def uninstall(self):
if not self.is_configured():
return
self.print_msg("Unconfiguring %s" % self.service_name)
# just eat states
self.restore_state("running")
self.restore_state("enabled")
# stop and disable service (IPA service, we do not need it anymore)
self.disable()
self.stop()
# restore state of dnssec default signer daemon
signerd_enabled = self.restore_state("singerd_enabled")
signerd_running = self.restore_state("singerd_runnning")
signerd_service = services.knownservices.ods_signerd
signerd_service.unmask()
# service was stopped and disabled by setup
if signerd_enabled:
signerd_service.enable()
if signerd_running:
signerd_service.start()
| [
"#\n",
"# Copyright (C) 2014 FreeIPA Contributors see COPYING for license\n",
"#\n",
"\n",
"import os\n",
"import pwd\n",
"import grp\n",
"\n",
"import ldap\n",
"\n",
"from ipaserver.install import service\n",
"from ipaserver.install import installutils\n",
"from ipapython.ipa_log_manager import *\n",
"from ipapython.dn import DN\n",
"from ipapython import sysrestore, ipautil, ipaldap\n",
"from ipaplatform.paths import paths\n",
"from ipaplatform import services\n",
"from ipalib import errors, api\n",
"\n",
"\n",
"class ODSExporterInstance(service.Service):\n",
" def __init__(self, fstore=None, dm_password=None, ldapi=False,\n",
" start_tls=False, autobind=ipaldap.AUTOBIND_ENABLED):\n",
" service.Service.__init__(\n",
" self, \"ipa-ods-exporter\",\n",
" service_desc=\"IPA OpenDNSSEC exporter daemon\",\n",
" dm_password=dm_password,\n",
" ldapi=ldapi,\n",
" autobind=autobind,\n",
" start_tls=start_tls\n",
" )\n",
" self.dm_password = dm_password\n",
" self.ods_uid = None\n",
" self.ods_gid = None\n",
" self.enable_if_exists = False\n",
"\n",
" if fstore:\n",
" self.fstore = fstore\n",
" else:\n",
" self.fstore = sysrestore.FileStore(paths.SYSRESTORE)\n",
"\n",
" suffix = ipautil.dn_attribute_property('_suffix')\n",
"\n",
" def create_instance(self, fqdn, realm_name):\n",
" self.backup_state(\"enabled\", self.is_enabled())\n",
" self.backup_state(\"running\", self.is_running())\n",
" self.fqdn = fqdn\n",
" self.realm = realm_name\n",
" self.suffix = ipautil.realm_to_suffix(self.realm)\n",
"\n",
" try:\n",
" self.stop()\n",
" except:\n",
" pass\n",
"\n",
" # get a connection to the DS\n",
" self.ldap_connect()\n",
" # checking status step must be first\n",
" self.step(\"checking status\", self.__check_dnssec_status)\n",
" self.step(\"setting up DNS Key Exporter\", self.__setup_key_exporter)\n",
" self.step(\"setting up kerberos principal\", self.__setup_principal)\n",
" self.step(\"disabling default signer daemon\", self.__disable_signerd)\n",
" self.step(\"starting DNS Key Exporter\", self.__start)\n",
" self.step(\"configuring DNS Key Exporter to start on boot\", self.__enable)\n",
" self.start_creation()\n",
"\n",
" def __check_dnssec_status(self):\n",
" ods_enforcerd = services.knownservices.ods_enforcerd\n",
"\n",
" try:\n",
" self.ods_uid = pwd.getpwnam(ods_enforcerd.get_user_name()).pw_uid\n",
" except KeyError:\n",
" raise RuntimeError(\"OpenDNSSEC UID not found\")\n",
"\n",
" try:\n",
" self.ods_gid = grp.getgrnam(ods_enforcerd.get_group_name()).gr_gid\n",
" except KeyError:\n",
" raise RuntimeError(\"OpenDNSSEC GID not found\")\n",
"\n",
" def __enable(self):\n",
"\n",
" try:\n",
" self.ldap_enable('DNSKeyExporter', self.fqdn, self.dm_password,\n",
" self.suffix)\n",
" except errors.DuplicateEntry:\n",
" root_logger.error(\"DNSKeyExporter service already exists\")\n",
"\n",
" def __setup_key_exporter(self):\n",
" installutils.set_directive(paths.SYSCONFIG_IPA_ODS_EXPORTER,\n",
" 'SOFTHSM2_CONF',\n",
" paths.DNSSEC_SOFTHSM2_CONF,\n",
" quotes=False, separator='=')\n",
"\n",
" def __setup_principal(self):\n",
" assert self.ods_uid is not None\n",
" dns_exporter_principal = \"ipa-ods-exporter/\" + self.fqdn + \"@\" + self.realm\n",
" installutils.kadmin_addprinc(dns_exporter_principal)\n",
"\n",
" # Store the keytab on disk\n",
" installutils.create_keytab(paths.IPA_ODS_EXPORTER_KEYTAB, dns_exporter_principal)\n",
" p = self.move_service(dns_exporter_principal)\n",
" if p is None:\n",
" # the service has already been moved, perhaps we're doing a DNS reinstall\n",
" dns_exporter_principal_dn = DN(\n",
" ('krbprincipalname', dns_exporter_principal),\n",
" ('cn', 'services'), ('cn', 'accounts'), self.suffix)\n",
" else:\n",
" dns_exporter_principal_dn = p\n",
"\n",
" # Make sure access is strictly reserved to the ods user\n",
" os.chmod(paths.IPA_ODS_EXPORTER_KEYTAB, 0o440)\n",
" os.chown(paths.IPA_ODS_EXPORTER_KEYTAB, 0, self.ods_gid)\n",
"\n",
" dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),\n",
" ('cn', 'pbac'), self.suffix)\n",
" mod = [(ldap.MOD_ADD, 'member', dns_exporter_principal_dn)]\n",
"\n",
" try:\n",
" self.admin_conn.modify_s(dns_group, mod)\n",
" except ldap.TYPE_OR_VALUE_EXISTS:\n",
" pass\n",
" except Exception as e:\n",
" root_logger.critical(\"Could not modify principal's %s entry: %s\"\n",
" % (dns_exporter_principal_dn, str(e)))\n",
" raise\n",
"\n",
" # limit-free connection\n",
"\n",
" mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),\n",
" (ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),\n",
" (ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),\n",
" (ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]\n",
" try:\n",
" self.admin_conn.modify_s(dns_exporter_principal_dn, mod)\n",
" except Exception as e:\n",
" root_logger.critical(\"Could not set principal's %s LDAP limits: %s\"\n",
" % (dns_exporter_principal_dn, str(e)))\n",
" raise\n",
"\n",
" def __disable_signerd(self):\n",
" signerd_service = services.knownservices.ods_signerd\n",
"\n",
" self.backup_state(\"singerd_running\", signerd_service.is_running())\n",
" self.backup_state(\"singerd_enabled\", signerd_service.is_enabled())\n",
"\n",
" # disable default opendnssec signer daemon\n",
" signerd_service.stop()\n",
" signerd_service.mask()\n",
"\n",
" def __start(self):\n",
" self.start()\n",
"\n",
" def remove_service(self):\n",
" dns_exporter_principal = (\"ipa-ods-exporter/%s@%s\" % (self.fqdn,\n",
" self.realm))\n",
" try:\n",
" api.Command.service_del(dns_exporter_principal)\n",
" except errors.NotFound:\n",
" pass\n",
"\n",
" def uninstall(self):\n",
" if not self.is_configured():\n",
" return\n",
"\n",
" self.print_msg(\"Unconfiguring %s\" % self.service_name)\n",
"\n",
" # just eat states\n",
" self.restore_state(\"running\")\n",
" self.restore_state(\"enabled\")\n",
"\n",
" # stop and disable service (IPA service, we do not need it anymore)\n",
" self.disable()\n",
" self.stop()\n",
"\n",
" # restore state of dnssec default signer daemon\n",
" signerd_enabled = self.restore_state(\"singerd_enabled\")\n",
" signerd_running = self.restore_state(\"singerd_runnning\")\n",
" signerd_service = services.knownservices.ods_signerd\n",
"\n",
" signerd_service.unmask()\n",
"\n",
" # service was stopped and disabled by setup\n",
" if signerd_enabled:\n",
" signerd_service.enable()\n",
"\n",
" if signerd_running:\n",
" signerd_service.start()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 187 | 0.000585 |
# -*- coding: latin1 -*-
################################################################################################
#
#
import tweepy, datetime, sys, time, json, os, os.path, shutil, time, struct, random
import multi_oauth
#Script que contém as chaves para autenticação do twitter
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 6 - Coletar amigos do Twitter
##
## 6.0 - Usando o conjunto de egos do diretório DATASET - è apenas um subconjunto para facilitar o desenvolvimento do trabalho..
## Assim que concluída a coleta desse subconjunto, pode-se voltar a coletar usando a versão 5.
## 6.1 Melhoria na recepção de erros da API
##
##
## SALVAR APENAS O NECESSÁRIO PARA ECONOMIZAR ESPAÇO EM DISCO. Coletar tweets completos ocupa muito espaço.
##
## OBS> Twitter bloqueou diversas contas por suspeita de spam... redobrar as atenções com os scripts criados.
##
## STATUS - Coletando - OK - Salvar arquivos BINÀRIOS!! contendo os a tweets favoritados a partir dos autores do tweets favoritados pelo egos.
##
## STATUS - Refazer a coleta até que não tenha nenhuma mensagem de "Rate Limit Exceeded" - A cada mensagem há um usuário que ficou sem ser coletada
##
##
######################################################################################################################################################################
######################################################################################################################################################################
#
# Realiza autenticação da aplicação.
#
######################################################################################################################################################################
def autentication(auths):
global key
key += 1
if (key >= key_limit):
key = key_init
print
print("######################################################################")
print ("Autenticando usando chave número: "+str(key)+"/"+str(key_limit))
print("######################################################################\n")
time.sleep(wait)
api_key = tweepy.API(auths[key])
return (api_key)
######################################################################################################################################################################
#
# Converte formato data para armazenar em formato JSON
#
######################################################################################################################################################################
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
encoded_object = list(obj.timetuple())[0:6]
else:
encoded_object =json.JSONEncoder.default(self, obj)
return encoded_object
################################################################################################
# Imprime os arquivos binários com os ids dos amigos
################################################################################################
def read_arq_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
authors_list = set()
while f.tell() < tamanho:
buffer = f.read(favorites_struct.size)
tweet, user = favorites_struct.unpack(buffer)
authors_list.add(user)
return authors_list
######################################################################################################################################################################
#
# Grava o erro num arquivo específco
#
######################################################################################################################################################################
def save_error(user,reason):
agora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M') # Recupera o instante atual na forma AnoMesDiaHoraMinuto
error={}
with open(error_dir+"timeline_collect.err", "a+") as outfile: # Abre o arquivo para gravação no final do arquivo
error = {'user':user,'reason':str(reason) ,'date':agora, 'key':key}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
######################################################################################################################################################################
#
# Tweepy - Realiza a busca e devolve os favofitos de um usuário específico
#
######################################################################################################################################################################
def get_favorites(j,k,l,user): #Coleta dos favoritos
global key
global dictionary
global api
global i
favorites = []
try:
for page in tweepy.Cursor(api.favorites,id=user, count=200).pages(16): #Retorna os favoritos do usuário
for tweet in page:
favorites.append(tweet)
return (favorites)
except tweepy.error.RateLimitError as e:
print("Limite de acesso à API excedido. User: "+str(user)+" - Autenticando novamente... "+str(e))
api = autentication(auths)
except tweepy.error.RateLimitError as e:
print("Limite de acesso à API excedido. User: "+str(user)+" - Autenticando novamente... "+str(e))
api = autentication(auths)
except tweepy.error.TweepError as e:
print ("ERRO - Ego nº: "+str(j)+" - Alter ("+str(k)+"/"+str(l)+"): "+str(user))
try:
if e.reason == "Twitter error response: status code = 404": # Usuários não existentes ou não encontrados
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".dat", "w") as f: # Cria arquivo vazio
print ("Usuário não encontrado. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
elif e.reason == "Twitter error response: status code = 401": # Usuários não existentes ou não encontrados
save_error(user,e.reason)
api = autentication(auths)
elif e.message == 'Not authorized.': # Usuários não autorizados
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".dat", "w") as f: # Cria arquivo vazio
print ("Usuário não autorizado. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
elif e.message[0]['code'] == 32 or e.message[0]['code'] == 215 or e.message[0]['code'] == 429 or e.message[0]['code'] == 401:
save_error(user,e.message)
key = random.randint(key_init,key_limit)
api = autentication(auths)
elif e.message[0]['code'] == 34 or e.message[0]['code'] == 404: # Usuários não existentes ou não encontrados
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".dat", "w") as f: # Cria arquivo vazio
print ("Usuário inexistente. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
else:
save_error(user,e)
api = autentication(auths)
except Exception as e2:
save_error(user,e2)
api = autentication(auths)
######################################################################################################################################################################
#
# Obtem favoritos dos usuários
#
######################################################################################################################################################################
def save_favorites(j,k,l,user): # j = número do ego que esta sendo coletado - k = numero do alter que esta sendo verificado - l = tamanho da lista de amigos do ego
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
# Dicionário - Tabela Hash contendo os usuários já coletados
global dictionary
#Chama a função e recebe como retorno a lista de tweets do usuário
t = 0 # Número de Tweets por usuário
favorites = get_favorites(j,k,l,user)
if favorites:
try:
with open(data_dir+str(user)+".dat", "w+b") as f:
for status in favorites:
t+=1
f.write(favorites_struct.pack(status.id,status.user.id)) # Grava os ids dos amigos no arquivo binário do usuário
###
# tweets_list = read_arq_bin(data_dir+str(user)+".dat") # Função para converter o binário de volta em string em formato json.
# print tweets_list
####
dictionary[user] = user # Insere o usuário coletado na tabela em memória
i +=1
print ("Ego nº: "+str(j)+" - Alter ("+str(k)+"/"+str(l)+"): "+str(user)+" coletados com sucesso. "+str(t)+" tweets. Total coletados: "+str(i))
except Exception as e:
if e.message:
save_error(user,e.message)
else:
save_error(user,str(e))
if os.path.exists(data_dir+str(user)+".dat"):
os.remove(data_dir+str(user)+".dat")
print ("Arquivo removido co sucesso...")
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos favoritos do user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
j = 0 # Exibe o número ordinal do ego que está sendo usado para a coleta dos favoritos
for file in os.listdir(egos_favorites_dir): # Verifica a lista de egos coletados e para cada um, busca os autores de retweets dos alters listados no arquivo do ego.
j+=1
authors_list = read_arq_bin(egos_favorites_dir+file) # Função para converter o binário de volta em string em formato json.
l = len(authors_list) # Exibe o tamanho/quantidade de autores de retweets do ego
k = 0 #Exibe o número ordinal do alter que está sendo coletado a lista de amigos
for authors in authors_list:
k+=1
if not dictionary.has_key(authors):
save_favorites(j,k,l,authors) #Inicia função de busca
# print ("Ego: "+str(j)+" - "+str(len(authors_list))+" alters.")
print
print("######################################################################")
print("Coleta finalizada!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
################################### DEFINIR SE É TESTE OU NÃO!!! ### ['auths_ok'] OU ['auths_test'] ################
oauth_keys = multi_oauth.keys()
auths = oauth_keys['auths_ok']
################################### CONFIGURAR AS LINHAS A SEGUIR ####################################################
######################################################################################################################
qtde_egos = 'full' # 50, 100, 500 ou full
######################################################################################################################
######################################################################################################################
key_init = 0 ################################################################ Essas duas linhas atribuem as chaves para cada script
key_limit = len(auths) ################################################################ Usa todas as chaves (tamanho da lista de chaves)
key = random.randint(key_init,key_limit) ################################################## Inicia o script a partir de uma chave aleatória do conjunto de chaves
egos_favorites_dir = "/home/amaury/dataset/n3/egos/bin/" # Arquivo contendo a lista dos usuários ego já coletados
data_dir = "/home/amaury/coleta/n3/alters/"+str(qtde_egos)+"/bin/" # Diretório para armazenamento dos arquivos
error_dir = "/home/amaury/coleta/n3/alters/"+str(qtde_egos)+"/error/" # Diretório para armazenamento dos arquivos de erro
formato = 'll' ################################################################### Long para o código ('l') e depois o array de chars de X posições:
favorites_struct = struct.Struct(formato) ################################################# Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
wait = 5
dictionary = {} #################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados
######################################################################################################################
######################################################################################################################
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(error_dir):
os.makedirs(error_dir)
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print
print("######################################################################")
print ("Criando tabela hash...")
i = 0 #Conta quantos usuários já foram coletados (todos arquivos no diretório)
for file in os.listdir(data_dir):
user_id = file.split(".dat")
user_id = long(user_id[0])
dictionary[user_id] = user_id
i+=1
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
#Autenticação
api = autentication(auths)
#Executa o método main
if __name__ == "__main__": main() | [
"# -*- coding: latin1 -*-\n",
"################################################################################################\n",
"#\t\n",
"#\n",
"import tweepy, datetime, sys, time, json, os, os.path, shutil, time, struct, random\n",
"import multi_oauth\n",
"#Script que contém as chaves para autenticação do twitter\n",
"\n",
"reload(sys)\n",
"sys.setdefaultencoding('utf-8')\n",
"\n",
"######################################################################################################################################################################\n",
"##\t\tStatus - Versão 6 - Coletar amigos do Twitter\n",
"##\t\t\t\t\t\t\n",
"##\t\t\t\t\t\t6.0 - Usando o conjunto de egos do diretório DATASET - è apenas um subconjunto para facilitar o desenvolvimento do trabalho..\n",
"##\t\t\t\t\t\t\t\tAssim que concluída a coleta desse subconjunto, pode-se voltar a coletar usando a versão 5.\n",
"##\t\t\t\t\t\t6.1\tMelhoria na recepção de erros da API\n",
"##\n",
"##\t\t\t\t\n",
"##\t\t\t\t\t\tSALVAR APENAS O NECESSÁRIO PARA ECONOMIZAR ESPAÇO EM DISCO. Coletar tweets completos ocupa muito espaço.\n",
"##\n",
"##\t\t\t\t\t\tOBS> Twitter bloqueou diversas contas por suspeita de spam... redobrar as atenções com os scripts criados.\t\t\t\t\n",
"##\n",
"##\t\t\t\t\t\tSTATUS - Coletando - OK - Salvar arquivos BINÀRIOS!! contendo os a tweets favoritados a partir dos autores do tweets favoritados pelo egos.\n",
"##\n",
"##\t\t\t\t\t\tSTATUS - Refazer a coleta até que não tenha nenhuma mensagem de \"Rate Limit Exceeded\" - A cada mensagem há um usuário que ficou sem ser coletada\n",
"##\n",
"## \n",
"######################################################################################################################################################################\n",
"\n",
"######################################################################################################################################################################\n",
"#\n",
"# Realiza autenticação da aplicação.\n",
"#\n",
"######################################################################################################################################################################\n",
"\n",
"def autentication(auths):\n",
"\tglobal key\n",
"\tkey += 1\n",
"\tif (key >= key_limit):\n",
"\t\tkey = key_init\n",
"\tprint\n",
"\tprint(\"######################################################################\")\n",
"\tprint (\"Autenticando usando chave número: \"+str(key)+\"/\"+str(key_limit))\n",
"\tprint(\"######################################################################\\n\")\n",
"\ttime.sleep(wait)\n",
"\tapi_key = tweepy.API(auths[key])\n",
"\treturn (api_key)\n",
"\n",
"######################################################################################################################################################################\n",
"#\n",
"# Converte formato data para armazenar em formato JSON\n",
"#\n",
"######################################################################################################################################################################\n",
"class DateTimeEncoder(json.JSONEncoder):\n",
" def default(self, obj):\n",
" if isinstance(obj, datetime.datetime):\n",
" encoded_object = list(obj.timetuple())[0:6]\n",
" else:\n",
" encoded_object =json.JSONEncoder.default(self, obj)\n",
" return encoded_object\n",
"\n",
"################################################################################################\n",
"# Imprime os arquivos binários com os ids dos amigos\n",
"################################################################################################\n",
"def read_arq_bin(file):\n",
"\twith open(file, 'r') as f:\t \n",
"\t\tf.seek(0,2)\n",
"\t\ttamanho = f.tell()\n",
"\t\tf.seek(0)\n",
"\t\tauthors_list = set()\n",
"\t\twhile f.tell() < tamanho:\n",
"\t\t\tbuffer = f.read(favorites_struct.size)\n",
"\t\t\ttweet, user = favorites_struct.unpack(buffer)\n",
"\t\t\tauthors_list.add(user)\n",
"\treturn authors_list\n",
"\n",
"######################################################################################################################################################################\n",
"#\n",
"# Grava o erro num arquivo específco \n",
"#\n",
"######################################################################################################################################################################\n",
"def save_error(user,reason):\n",
"\tagora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M')\t\t\t\t# Recupera o instante atual na forma AnoMesDiaHoraMinuto\n",
"\terror={}\n",
"\twith open(error_dir+\"timeline_collect.err\", \"a+\") as outfile:\t\t\t\t\t\t\t\t# Abre o arquivo para gravação no final do arquivo\n",
"\t\terror = {'user':user,'reason':str(reason) ,'date':agora, 'key':key}\n",
"\t\toutfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+\"\\n\")\n",
"\tprint error\n",
"\t\n",
"######################################################################################################################################################################\n",
"#\n",
"# Tweepy - Realiza a busca e devolve os favofitos de um usuário específico \n",
"#\n",
"######################################################################################################################################################################\n",
"def get_favorites(j,k,l,user):\t\t\t\t\t\t\t\t\t\t\t\t#Coleta dos favoritos\n",
"\tglobal key\n",
"\tglobal dictionary\n",
"\tglobal api\n",
"\tglobal i\n",
"\tfavorites = []\n",
"\ttry:\n",
"\t\tfor page in tweepy.Cursor(api.favorites,id=user, count=200).pages(16):\t\t\t\t#Retorna os favoritos do usuário\n",
"\t\t\tfor tweet in page:\n",
"\t\t\t\tfavorites.append(tweet)\n",
"\t\treturn (favorites)\n",
"\t\n",
"\texcept tweepy.error.RateLimitError as e:\n",
"\t\t\tprint(\"Limite de acesso à API excedido. User: \"+str(user)+\" - Autenticando novamente... \"+str(e))\n",
"\t\t\tapi = autentication(auths)\n",
"\n",
"\texcept tweepy.error.RateLimitError as e:\n",
"\t\tprint(\"Limite de acesso à API excedido. User: \"+str(user)+\" - Autenticando novamente... \"+str(e))\n",
"\t\tapi = autentication(auths)\n",
"\n",
"\texcept tweepy.error.TweepError as e:\n",
"\t\tprint (\"ERRO - Ego nº: \"+str(j)+\" - Alter (\"+str(k)+\"/\"+str(l)+\"): \"+str(user))\n",
"\t\ttry:\n",
"\t\t\tif e.reason == \"Twitter error response: status code = 404\":\t\t\t\t\t\t\t# Usuários não existentes ou não encontrados\n",
"\t\t\t\tdictionary[user] = user\t\t\t\t\t\t\t\t\t\t\t# Insere o usuário coletado na tabela em memória\n",
"\t\t\t\twith open(data_dir+str(user)+\".dat\", \"w\") as f:\t\t\t# Cria arquivo vazio\t\n",
"\t\t\t\t\tprint (\"Usuário não encontrado. User: \"+str(user)+\" - Arquivo criado com sucesso!\")\n",
"\t\t\t\ti +=1\n",
"\n",
"\t\t\telif e.reason == \"Twitter error response: status code = 401\":\t\t\t\t\t\t\t# Usuários não existentes ou não encontrados\n",
"\t\t\t\tsave_error(user,e.reason)\n",
"\t\t\t\tapi = autentication(auths)\n",
"\t\t\t\n",
"\t\t\telif e.message == 'Not authorized.': # Usuários não autorizados\n",
"\t\t\t\tdictionary[user] = user\t\t\t\t\t\t\t\t\t\t\t# Insere o usuário coletado na tabela em memória\n",
"\t\t\t\twith open(data_dir+str(user)+\".dat\", \"w\") as f:\t\t\t# Cria arquivo vazio\n",
"\t\t\t\t\tprint (\"Usuário não autorizado. User: \"+str(user)+\" - Arquivo criado com sucesso!\")\n",
"\t\t\t\ti +=1\t\t\t\t\t\t\t\t\t\t\t\n",
"\n",
"\t\t\telif e.message[0]['code'] == 32 or e.message[0]['code'] == 215 or e.message[0]['code'] == 429 or e.message[0]['code'] == 401:\n",
"\t\t\t\tsave_error(user,e.message)\t\t\t\t\n",
"\t\t\t\tkey = random.randint(key_init,key_limit)\n",
"\t\t\t\tapi = autentication(auths)\n",
"\t\t\t\t\t\n",
"\t\t\telif e.message[0]['code'] == 34 or e.message[0]['code'] == 404:\t\t\t\t\t\t\t\t\t# Usuários não existentes ou não encontrados\n",
"\t\t\t\tdictionary[user] = user\t\t\t\t\t\t\t\t\t\t\t# Insere o usuário coletado na tabela em memória\n",
"\t\t\t\twith open(data_dir+str(user)+\".dat\", \"w\") as f:\t\t\t# Cria arquivo vazio\t\n",
"\t\t\t\t\tprint (\"Usuário inexistente. User: \"+str(user)+\" - Arquivo criado com sucesso!\")\n",
"\t\t\t\ti +=1\n",
"\t\t\telse:\n",
"\t\t\t\tsave_error(user,e)\n",
"\t\t\t\tapi = autentication(auths)\n",
"\t\texcept Exception as e2:\n",
"\t\t\tsave_error(user,e2)\n",
"\t\t\tapi = autentication(auths)\t\n",
"######################################################################################################################################################################\n",
"#\n",
"# Obtem favoritos dos usuários\n",
"#\n",
"######################################################################################################################################################################\n",
"def save_favorites(j,k,l,user): # j = número do ego que esta sendo coletado - k = numero do alter que esta sendo verificado - l = tamanho da lista de amigos do ego\n",
"\tglobal i\t# numero de usuários com arquivos já coletados / Numero de arquivos no diretório\n",
"\t \n",
"\t# Dicionário - Tabela Hash contendo os usuários já coletados\n",
"\tglobal dictionary\n",
"\n",
"\t#Chama a função e recebe como retorno a lista de tweets do usuário\n",
"\tt = 0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Número de Tweets por usuário\n",
"\tfavorites = get_favorites(j,k,l,user)\n",
"\tif favorites:\t\n",
"\t\ttry:\n",
"\t\t\twith open(data_dir+str(user)+\".dat\", \"w+b\") as f:\n",
"\t\t\t\tfor status in favorites:\n",
"\t\t\t\t\tt+=1\n",
"\t\t\t\t\tf.write(favorites_struct.pack(status.id,status.user.id))\t\t\t\t\t\t# Grava os ids dos amigos no arquivo binário do usuário\n",
"###\n",
"#\t\t\ttweets_list = read_arq_bin(data_dir+str(user)+\".dat\") # Função para converter o binário de volta em string em formato json.\n",
"#\t\t\tprint tweets_list\n",
"####\t\t\t\t\n",
"\t\t\tdictionary[user] = user\t\t\t\t\t\t\t\t\t# Insere o usuário coletado na tabela em memória\n",
"\t\t\ti +=1\n",
"\t\t\tprint (\"Ego nº: \"+str(j)+\" - Alter (\"+str(k)+\"/\"+str(l)+\"): \"+str(user)+\" coletados com sucesso. \"+str(t)+\" tweets. Total coletados: \"+str(i))\n",
"\t\n",
"\t\texcept Exception as e:\t\n",
"\t\t\tif e.message:\t\t\n",
"\t\t\t\tsave_error(user,e.message)\n",
"\t\t\telse:\n",
"\t\t\t\tsave_error(user,str(e))\n",
"\t\t\tif os.path.exists(data_dir+str(user)+\".dat\"):\n",
"\t\t\t\tos.remove(data_dir+str(user)+\".dat\")\n",
"\t\t\t\tprint (\"Arquivo removido co sucesso...\")\n",
"\n",
"\n",
"######################################################################################################################################################################\n",
"######################################################################################################################################################################\n",
"#\n",
"# Método principal do programa.\n",
"# Realiza teste e coleta dos favoritos do user especificado no arquivo. \n",
"#\n",
"######################################################################################################################################################################\n",
"######################################################################################################################################################################\n",
"\n",
"def main():\n",
"\tglobal i \t\t\t\t\t\t\t\t\t\t\t\t\t# numero de usuários com arquivos já coletados / Numero de arquivos no diretório\n",
"\tj = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Exibe o número ordinal do ego que está sendo usado para a coleta dos favoritos\n",
"\t\n",
"\tfor file in os.listdir(egos_favorites_dir):\t\t\t\t\t# Verifica a lista de egos coletados e para cada um, busca os autores de retweets dos alters listados no arquivo do ego.\n",
"\t\tj+=1\n",
"\t\tauthors_list = read_arq_bin(egos_favorites_dir+file) # Função para converter o binário de volta em string em formato json.\n",
"\t\tl = len(authors_list)\t\t\t\t\t\t\t\t\t\t# Exibe o tamanho/quantidade de autores de retweets do ego\n",
"\t\tk = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Exibe o número ordinal do alter que está sendo coletado a lista de amigos\n",
"\t\tfor authors in authors_list:\n",
"\t\t\tk+=1\n",
"\t\t\tif not dictionary.has_key(authors):\n",
"\t\t\t\tsave_favorites(j,k,l,authors)\t\t\t\t\t\t\t#Inicia função de busca\n",
"#\t\tprint (\"Ego: \"+str(j)+\" - \"+str(len(authors_list))+\" alters.\")\n",
"\tprint\n",
"\tprint(\"######################################################################\")\n",
"\tprint(\"Coleta finalizada!\")\n",
"\tprint(\"######################################################################\\n\")\n",
"\n",
"######################################################################################################################################################################\n",
"#\n",
"# INÍCIO DO PROGRAMA\n",
"#\n",
"######################################################################################################################################################################\n",
"\n",
"################################### DEFINIR SE É TESTE OU NÃO!!! ### ['auths_ok'] OU ['auths_test'] ################\t\t\t\t\n",
"oauth_keys = multi_oauth.keys()\n",
"auths = oauth_keys['auths_ok']\n",
"\t\n",
"################################### CONFIGURAR AS LINHAS A SEGUIR ####################################################\n",
"######################################################################################################################\n",
"qtde_egos = 'full' \t\t# 50, 100, 500 ou full\n",
"######################################################################################################################\n",
"######################################################################################################################\n",
"key_init = 0\t\t\t\t\t################################################################ Essas duas linhas atribuem as chaves para cada script\n",
"key_limit = len(auths)\t\t################################################################ Usa todas as chaves (tamanho da lista de chaves)\n",
"key = random.randint(key_init,key_limit) ################################################## Inicia o script a partir de uma chave aleatória do conjunto de chaves\n",
"egos_favorites_dir = \"/home/amaury/dataset/n3/egos/bin/\"\t\t\t\t\t\t\t\t\t\t\t\t# Arquivo contendo a lista dos usuários ego já coletados\n",
"data_dir = \"/home/amaury/coleta/n3/alters/\"+str(qtde_egos)+\"/bin/\"\t\t\t\t\t\t\t\t# Diretório para armazenamento dos arquivos\n",
"error_dir = \"/home/amaury/coleta/n3/alters/\"+str(qtde_egos)+\"/error/\" \t\t\t\t\t\t\t# Diretório para armazenamento dos arquivos de erro\n",
"formato = 'll'\t\t\t\t################################################################### Long para o código ('l') e depois o array de chars de X posições:\t\n",
"favorites_struct = struct.Struct(formato) ################################################# Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário\n",
"wait = 5\n",
"dictionary = {}\t\t\t\t#################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados\n",
"######################################################################################################################\n",
"######################################################################################################################\n",
"######################################################################################################################\n",
"#Cria os diretórios para armazenamento dos arquivos\n",
"if not os.path.exists(data_dir):\n",
"\tos.makedirs(data_dir)\n",
"if not os.path.exists(error_dir):\n",
"\tos.makedirs(error_dir)\n",
"\n",
"###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.\n",
"print\n",
"print(\"######################################################################\")\n",
"print (\"Criando tabela hash...\")\n",
"i = 0\t#Conta quantos usuários já foram coletados (todos arquivos no diretório)\n",
"for file in os.listdir(data_dir):\n",
"\tuser_id = file.split(\".dat\")\n",
"\tuser_id = long(user_id[0])\n",
"\tdictionary[user_id] = user_id\n",
"\ti+=1\n",
"print (\"Tabela hash criada com sucesso...\") \n",
"print(\"######################################################################\\n\")\n",
"#Autenticação\n",
"api = autentication(auths)\n",
"\n",
"\t\n",
"#Executa o método main\n",
"if __name__ == \"__main__\": main()"
] | [
0,
0.010309278350515464,
0.6666666666666666,
0,
0.023809523809523808,
0,
0.017241379310344827,
0,
0,
0,
0,
0.005988023952095809,
0.02,
0.2222222222222222,
0.014925373134328358,
0.0196078431372549,
0.02040816326530612,
0,
0.2857142857142857,
0.017699115044247787,
0,
0.025210084033613446,
0,
0.013513513513513514,
0,
0.012987012987012988,
0,
0.25,
0.005988023952095809,
0,
0.005988023952095809,
0,
0,
0,
0.005988023952095809,
0,
0.038461538461538464,
0.08333333333333333,
0.1,
0.041666666666666664,
0.058823529411764705,
0.14285714285714285,
0.024691358024691357,
0.02702702702702703,
0.024096385542168676,
0.05555555555555555,
0.029411764705882353,
0.05555555555555555,
0,
0.005988023952095809,
0,
0,
0,
0.005988023952095809,
0.024390243902439025,
0.03571428571428571,
0,
0,
0,
0.015625,
0,
0,
0.010309278350515464,
0,
0.010309278350515464,
0.041666666666666664,
0.1,
0.14285714285714285,
0.047619047619047616,
0.08333333333333333,
0.043478260869565216,
0.03571428571428571,
0.023809523809523808,
0.02040816326530612,
0.038461538461538464,
0.047619047619047616,
0,
0.005988023952095809,
0,
0.02631578947368421,
0,
0.005988023952095809,
0.06896551724137931,
0.014814814814814815,
0.2,
0.01652892561983471,
0.11428571428571428,
0.023809523809523808,
0.07692307692307693,
1,
0.005988023952095809,
0,
0.013157894736842105,
0,
0.005988023952095809,
0.09375,
0.08333333333333333,
0.05263157894736842,
0.08333333333333333,
0.1,
0.0625,
0.16666666666666666,
0.03669724770642202,
0.045454545454545456,
0.03571428571428571,
0.047619047619047616,
1,
0.023809523809523808,
0.0297029702970297,
0.03333333333333333,
0,
0.023809523809523808,
0.02,
0.034482758620689655,
0,
0.02631578947368421,
0.036585365853658534,
0.14285714285714285,
0.017543859649122806,
0.022988505747126436,
0.02631578947368421,
0.033707865168539325,
0.2,
0,
0.017241379310344827,
0.06666666666666667,
0.03225806451612903,
0.5,
0.029850746268656716,
0.022988505747126436,
0.013333333333333334,
0.033707865168539325,
0.14285714285714285,
0,
0.015503875968992248,
0.08571428571428572,
0.044444444444444446,
0.03225806451612903,
0.3333333333333333,
0.016666666666666666,
0.022988505747126436,
0.02631578947368421,
0.03488372093023256,
0.2,
0.1111111111111111,
0.08695652173913043,
0.03225806451612903,
0.038461538461538464,
0.08695652173913043,
0.06451612903225806,
0.005988023952095809,
0,
0,
0,
0.005988023952095809,
0.03636363636363636,
0.03296703296703297,
1,
0.016129032258064516,
0.05263157894736842,
0,
0.029411764705882353,
0.018518518518518517,
0.10256410256410256,
0.125,
0.14285714285714285,
0.018867924528301886,
0.034482758620689655,
0.2,
0.024390243902439025,
0,
0.015625,
0.045454545454545456,
0.2222222222222222,
0.023809523809523808,
0.2222222222222222,
0.02054794520547945,
1,
0.07692307692307693,
0.10526315789473684,
0.06451612903225806,
0.1111111111111111,
0.07142857142857142,
0.02040816326530612,
0.024390243902439025,
0.044444444444444446,
0,
0,
0.005988023952095809,
0.005988023952095809,
0,
0,
0.0136986301369863,
0,
0.005988023952095809,
0.005988023952095809,
0,
0,
0.019230769230769232,
0.0196078431372549,
1,
0.011764705882352941,
0.2857142857142857,
0.024,
0.03260869565217391,
0.030612244897959183,
0.03225806451612903,
0.25,
0.05128205128205128,
0.078125,
0.015151515151515152,
0.14285714285714285,
0.024691358024691357,
0.034482758620689655,
0.024096385542168676,
0,
0.005988023952095809,
0,
0,
0,
0.005988023952095809,
0,
0.02459016393442623,
0.03125,
0,
1,
0.01680672268907563,
0.008403361344537815,
0,
0.008403361344537815,
0.008403361344537815,
0.014705882352941176,
0.014492753623188406,
0.024691358024691357,
0.008,
0.00847457627118644,
0.007751937984496124,
0.0196078431372549,
0.015957446808510637,
0,
0.013793103448275862,
0.008403361344537815,
0.008403361344537815,
0.008403361344537815,
0.019230769230769232,
0,
0.043478260869565216,
0,
0.041666666666666664,
0,
0.013157894736842105,
0,
0,
0.030303030303030304,
0.02531645569620253,
0,
0.03333333333333333,
0.03571428571428571,
0.03225806451612903,
0.3333333333333333,
0.044444444444444446,
0.012195121951219513,
0.07142857142857142,
0,
0,
1,
0.043478260869565216,
0.06060606060606061
] | 268 | 0.06843 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.network import base
from tempest.common import identity
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class RoutersAdminTest(base.BaseAdminNetworkTest):
# NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
# as some router operations, such as enabling or disabling SNAT
# require admin credentials by default
def _cleanup_router(self, router):
self.delete_router(router)
def _create_router(self, name=None, admin_state_up=False,
external_network_id=None, enable_snat=None):
# associate a cleanup with created routers to avoid quota limits
router = self.create_router(name, admin_state_up,
external_network_id, enable_snat)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_router, router)
return router
@classmethod
def skip_checks(cls):
super(RoutersAdminTest, cls).skip_checks()
if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@decorators.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
def test_create_router_setting_project_id(self):
# Test creating router from admin user setting project_id.
project = data_utils.rand_name('test_tenant_')
description = data_utils.rand_name('desc_')
project = identity.identity_utils(self.os_admin).create_project(
name=project, description=description)
project_id = project['id']
self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
project_id)
name = data_utils.rand_name('router-')
create_body = self.admin_routers_client.create_router(
name=name, tenant_id=project_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_routers_client.delete_router,
create_body['router']['id'])
self.assertEqual(project_id, create_body['router']['tenant_id'])
@decorators.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_default_snat_value(self):
# Create a router with default snat rule
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self._verify_router_gateway(
router['id'], {'network_id': CONF.network.public_network_id,
'enable_snat': True})
@decorators.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_snat_explicit(self):
name = data_utils.rand_name('snat-router')
# Create a router enabling snat attributes
enable_snat_states = [False, True]
for enable_snat in enable_snat_states:
external_gateway_info = {
'network_id': CONF.network.public_network_id,
'enable_snat': enable_snat}
create_body = self.admin_routers_client.create_router(
name=name, external_gateway_info=external_gateway_info)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_routers_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
self._verify_router_gateway(create_body['router']['id'],
exp_ext_gw_info=external_gateway_info)
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
show_body = self.admin_routers_client.show_router(router_id)
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
for k, v in exp_ext_gw_info.items():
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router_id,
device_owner="network:router_gateway")
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertNotEmpty(fixed_ips)
# Assert that all of the IPs from the router gateway port
# are allocated from a valid public subnet.
public_net_body = self.admin_networks_client.show_network(
CONF.network.public_network_id)
public_subnet_ids = public_net_body['network']['subnets']
for fixed_ip in fixed_ips:
subnet_id = fixed_ip['subnet_id']
self.assertIn(subnet_id, public_subnet_ids)
@decorators.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway(self):
router = self._create_router()
self.routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id})
# Verify operation - router
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_with_snat_explicit(self):
router = self._create_router()
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_without_snat(self):
router = self._create_router()
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_unset_gateway(self):
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self.routers_client.update_router(router['id'],
external_gateway_info={})
self._verify_router_gateway(router['id'])
# No gateway port expected
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router['id'])
self.assertFalse(list_body['ports'])
@decorators.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_reset_gateway_without_snat(self):
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
class RoutersIpV6AdminTest(RoutersAdminTest):
_ip_version = 6
| [
"# Copyright 2013 OpenStack Foundation\n",
"# All Rights Reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import testtools\n",
"\n",
"from tempest.api.network import base\n",
"from tempest.common import identity\n",
"from tempest.common import utils\n",
"from tempest import config\n",
"from tempest.lib.common.utils import data_utils\n",
"from tempest.lib.common.utils import test_utils\n",
"from tempest.lib import decorators\n",
"\n",
"CONF = config.CONF\n",
"\n",
"\n",
"class RoutersAdminTest(base.BaseAdminNetworkTest):\n",
" # NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest\n",
" # as some router operations, such as enabling or disabling SNAT\n",
" # require admin credentials by default\n",
"\n",
" def _cleanup_router(self, router):\n",
" self.delete_router(router)\n",
"\n",
" def _create_router(self, name=None, admin_state_up=False,\n",
" external_network_id=None, enable_snat=None):\n",
" # associate a cleanup with created routers to avoid quota limits\n",
" router = self.create_router(name, admin_state_up,\n",
" external_network_id, enable_snat)\n",
" self.addCleanup(test_utils.call_and_ignore_notfound_exc,\n",
" self._cleanup_router, router)\n",
" return router\n",
"\n",
" @classmethod\n",
" def skip_checks(cls):\n",
" super(RoutersAdminTest, cls).skip_checks()\n",
" if not utils.is_extension_enabled('router', 'network'):\n",
" msg = \"router extension not enabled.\"\n",
" raise cls.skipException(msg)\n",
"\n",
" @decorators.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')\n",
" def test_create_router_setting_project_id(self):\n",
" # Test creating router from admin user setting project_id.\n",
" project = data_utils.rand_name('test_tenant_')\n",
" description = data_utils.rand_name('desc_')\n",
" project = identity.identity_utils(self.os_admin).create_project(\n",
" name=project, description=description)\n",
" project_id = project['id']\n",
" self.addCleanup(identity.identity_utils(self.os_admin).delete_project,\n",
" project_id)\n",
"\n",
" name = data_utils.rand_name('router-')\n",
" create_body = self.admin_routers_client.create_router(\n",
" name=name, tenant_id=project_id)\n",
" self.addCleanup(test_utils.call_and_ignore_notfound_exc,\n",
" self.admin_routers_client.delete_router,\n",
" create_body['router']['id'])\n",
" self.assertEqual(project_id, create_body['router']['tenant_id'])\n",
"\n",
" @decorators.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')\n",
" @utils.requires_ext(extension='ext-gw-mode', service='network')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_create_router_with_default_snat_value(self):\n",
" # Create a router with default snat rule\n",
" router = self._create_router(\n",
" external_network_id=CONF.network.public_network_id)\n",
" self._verify_router_gateway(\n",
" router['id'], {'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': True})\n",
"\n",
" @decorators.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')\n",
" @utils.requires_ext(extension='ext-gw-mode', service='network')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_create_router_with_snat_explicit(self):\n",
" name = data_utils.rand_name('snat-router')\n",
" # Create a router enabling snat attributes\n",
" enable_snat_states = [False, True]\n",
" for enable_snat in enable_snat_states:\n",
" external_gateway_info = {\n",
" 'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': enable_snat}\n",
" create_body = self.admin_routers_client.create_router(\n",
" name=name, external_gateway_info=external_gateway_info)\n",
" self.addCleanup(test_utils.call_and_ignore_notfound_exc,\n",
" self.admin_routers_client.delete_router,\n",
" create_body['router']['id'])\n",
" # Verify snat attributes after router creation\n",
" self._verify_router_gateway(create_body['router']['id'],\n",
" exp_ext_gw_info=external_gateway_info)\n",
"\n",
" def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):\n",
" show_body = self.admin_routers_client.show_router(router_id)\n",
" actual_ext_gw_info = show_body['router']['external_gateway_info']\n",
" if exp_ext_gw_info is None:\n",
" self.assertIsNone(actual_ext_gw_info)\n",
" return\n",
" # Verify only keys passed in exp_ext_gw_info\n",
" for k, v in exp_ext_gw_info.items():\n",
" self.assertEqual(v, actual_ext_gw_info[k])\n",
"\n",
" def _verify_gateway_port(self, router_id):\n",
" list_body = self.admin_ports_client.list_ports(\n",
" network_id=CONF.network.public_network_id,\n",
" device_id=router_id,\n",
" device_owner=\"network:router_gateway\")\n",
" self.assertEqual(len(list_body['ports']), 1)\n",
" gw_port = list_body['ports'][0]\n",
" fixed_ips = gw_port['fixed_ips']\n",
" self.assertNotEmpty(fixed_ips)\n",
" # Assert that all of the IPs from the router gateway port\n",
" # are allocated from a valid public subnet.\n",
" public_net_body = self.admin_networks_client.show_network(\n",
" CONF.network.public_network_id)\n",
" public_subnet_ids = public_net_body['network']['subnets']\n",
" for fixed_ip in fixed_ips:\n",
" subnet_id = fixed_ip['subnet_id']\n",
" self.assertIn(subnet_id, public_subnet_ids)\n",
"\n",
" @decorators.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_update_router_set_gateway(self):\n",
" router = self._create_router()\n",
" self.routers_client.update_router(\n",
" router['id'],\n",
" external_gateway_info={\n",
" 'network_id': CONF.network.public_network_id})\n",
" # Verify operation - router\n",
" self._verify_router_gateway(\n",
" router['id'],\n",
" {'network_id': CONF.network.public_network_id})\n",
" self._verify_gateway_port(router['id'])\n",
"\n",
" @decorators.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')\n",
" @utils.requires_ext(extension='ext-gw-mode', service='network')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_update_router_set_gateway_with_snat_explicit(self):\n",
" router = self._create_router()\n",
" self.admin_routers_client.update_router(\n",
" router['id'],\n",
" external_gateway_info={\n",
" 'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': True})\n",
" self._verify_router_gateway(\n",
" router['id'],\n",
" {'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': True})\n",
" self._verify_gateway_port(router['id'])\n",
"\n",
" @decorators.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')\n",
" @utils.requires_ext(extension='ext-gw-mode', service='network')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_update_router_set_gateway_without_snat(self):\n",
" router = self._create_router()\n",
" self.admin_routers_client.update_router(\n",
" router['id'],\n",
" external_gateway_info={\n",
" 'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': False})\n",
" self._verify_router_gateway(\n",
" router['id'],\n",
" {'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': False})\n",
" self._verify_gateway_port(router['id'])\n",
"\n",
" @decorators.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_update_router_unset_gateway(self):\n",
" router = self._create_router(\n",
" external_network_id=CONF.network.public_network_id)\n",
" self.routers_client.update_router(router['id'],\n",
" external_gateway_info={})\n",
" self._verify_router_gateway(router['id'])\n",
" # No gateway port expected\n",
" list_body = self.admin_ports_client.list_ports(\n",
" network_id=CONF.network.public_network_id,\n",
" device_id=router['id'])\n",
" self.assertFalse(list_body['ports'])\n",
"\n",
" @decorators.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')\n",
" @utils.requires_ext(extension='ext-gw-mode', service='network')\n",
" @testtools.skipUnless(CONF.network.public_network_id,\n",
" 'The public_network_id option must be specified.')\n",
" def test_update_router_reset_gateway_without_snat(self):\n",
" router = self._create_router(\n",
" external_network_id=CONF.network.public_network_id)\n",
" self.admin_routers_client.update_router(\n",
" router['id'],\n",
" external_gateway_info={\n",
" 'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': False})\n",
" self._verify_router_gateway(\n",
" router['id'],\n",
" {'network_id': CONF.network.public_network_id,\n",
" 'enable_snat': False})\n",
" self._verify_gateway_port(router['id'])\n",
"\n",
"\n",
"class RoutersIpV6AdminTest(RoutersAdminTest):\n",
" _ip_version = 6\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 217 | 0 |
"""Unit tests for the doorstop.core.types module."""
import unittest
from doorstop.common import DoorstopError
from doorstop.core.types import Prefix, UID, Text, Level, Stamp, Reference
class TestPrefix(unittest.TestCase):
"""Unit tests for the Prefix class.""" # pylint: disable=W0212
def setUp(self):
self.prefix1 = Prefix('REQ')
self.prefix2 = Prefix('TST (@/tst)')
def test_init_empty(self):
"""Verify prefixes are parsed correctly (empty)."""
self.assertEqual(Prefix(''), Prefix())
self.assertEqual(Prefix(''), Prefix(None))
def test_init_instance(self):
"""Verify prefixes are parsed correctly (instance)."""
self.assertIs(self.prefix1, Prefix(self.prefix1))
self.assertEqual(Prefix(''), Prefix(None))
self.assertEqual(Prefix(''), Prefix(''))
def test_init_reseved(self):
"""Verify an exception is raised for a reserved word."""
self.assertRaises(DoorstopError, Prefix, 'ALL')
def test_repr(self):
"""Verify prefixes can be represented."""
self.assertEqual("Prefix('REQ')", repr(self.prefix1))
self.assertEqual("Prefix('TST')", repr(self.prefix2))
def test_str(self):
"""Verify prefixes can be converted to strings."""
self.assertEqual('REQ', str(self.prefix1))
self.assertEqual('TST', str(self.prefix2))
def test_eq(self):
"""Verify prefixes can be equated."""
self.assertEqual(Prefix('REQ'), self.prefix1)
self.assertNotEqual(self.prefix1, self.prefix2)
self.assertEqual(Prefix('req'), self.prefix1)
self.assertEqual('Req', self.prefix1)
self.assertNotEqual(None, self.prefix1)
self.assertNotEqual('all', self.prefix1)
def test_sort(self):
"""Verify prefixes can be sorted."""
prefixes = [Prefix('a'), Prefix('B'), Prefix('c')]
self.assertListEqual(prefixes, sorted(prefixes))
def test_short(self):
"""Verify the short representation of prefixes is correct."""
self.assertEqual('req', self.prefix1.short)
self.assertEqual('tst', self.prefix2.short)
class TestUID(unittest.TestCase):
"""Unit tests for the UID class.""" # pylint: disable=W0212
def setUp(self):
self.uid1 = UID('REQ001')
self.uid2 = UID('TST-02')
self.uid3 = UID('SYS', '-', 3, 5)
self.uid4 = UID('REQ001', stamp='abc123')
def test_init_str(self):
"""Verify UIDs are parsed correctly (string)."""
uid = UID('REQ')
self.assertRaises(DoorstopError, getattr, uid, 'prefix')
uid = UID('REQ-?')
self.assertRaises(DoorstopError, getattr, uid, 'number')
def test_init_dict(self):
"""Verify UIDs are parsed correctly (dictionary)."""
uid = UID({'REQ001': 'abc123'})
self.assertEqual('REQ', uid.prefix)
self.assertEqual(1, uid.number)
self.assertEqual('abc123', uid.stamp)
def test_init_values(self):
"""Verify UIDs are parsed correctly (values)."""
self.assertRaises(TypeError, UID, 'REQ', '-')
self.assertRaises(TypeError, UID, 'REQ', '-', 42)
self.assertRaises(TypeError, UID, 'REQ', '-', 42, 3, 'extra')
def test_init_empty(self):
"""Verify UIDs are parsed correctly (empty)."""
self.assertEqual(UID(''), UID())
self.assertEqual(UID(''), UID(None))
def test_init_instance(self):
"""Verify UIDs are parsed correctly (instance)."""
self.assertIs(self.uid1, UID(self.uid1))
self.assertIs(self.uid4, UID(self.uid4))
def test_repr(self):
"""Verify UIDs can be represented."""
self.assertEqual("UID('REQ001')", repr(self.uid1))
self.assertEqual("UID('TST-02')", repr(self.uid2))
self.assertEqual("UID('SYS-00003')", repr(self.uid3))
self.assertEqual("UID('REQ001', stamp='abc123')", repr(self.uid4))
def test_str(self):
"""Verify UIDs can be converted to strings."""
self.assertEqual('REQ001', str(self.uid1))
self.assertEqual('TST-02', str(self.uid2))
self.assertEqual('SYS-00003', str(self.uid3))
def test_eq(self):
"""Verify UIDs can be equated."""
self.assertEqual(UID('REQ.001'), UID('req', '', 1, 3))
self.assertEqual(UID('REQ1'), UID('REQ', '', 1, 3))
self.assertNotEqual(UID('REQ.2'), UID('REQ', '-', 1, 3))
self.assertEqual(UID('REQ1'), UID('REQ001 (@/req1.yml)'))
self.assertEqual('req1', UID('REQ001'))
self.assertNotEqual(None, UID('REQ001'))
self.assertEqual(self.uid1, self.uid4)
def test_sort(self):
"""Verify UIDs can be sorted."""
uids = [UID('a'), UID('a1'), UID('a2'), UID('b')]
self.assertListEqual(uids, sorted(uids))
def test_prefix(self):
"""Verify UIDs have prefixes."""
self.assertEqual('REQ', self.uid1.prefix)
self.assertEqual('TST', self.uid2.prefix)
self.assertEqual('SYS', self.uid3.prefix)
def test_number(self):
"""Verify UIDs have numbers."""
self.assertEqual(1, self.uid1.number)
self.assertEqual(2, self.uid2.number)
self.assertEqual(3, self.uid3.number)
def test_short(self):
"""Verify the short representation of IDs is correct."""
self.assertEqual('req1', self.uid1.short)
self.assertEqual('tst2', self.uid2.short)
self.assertEqual('sys3', self.uid3.short)
def test_string(self):
"""Verify UIDs can be converted to string including stamps."""
self.assertEqual("REQ001", self.uid1.string)
self.assertEqual("REQ001:abc123", self.uid4.string)
def test_stamp(self):
"""Verify stamps are stored correctly."""
self.assertEqual('abc123', self.uid4.stamp)
self.assertEqual('abc123', UID(self.uid4).stamp)
self.assertEqual('def456', UID(self.uid4, stamp='def456').stamp)
self.assertEqual(True, UID({'REQ001': 1}).stamp)
self.assertEqual(True, UID("REQ001:1").stamp)
class TestText(unittest.TestCase):
"""Unit tests for the Text class.""" # pylint: disable=W0212
def setUp(self):
self.text = Text("Hello, \nworld! ")
def test_init(self):
"""Verify Text is parsed correctly."""
self.assertEqual(Text(""), Text())
self.assertEqual(Text(""), Text(None))
self.assertEqual(Text(""), Text(""))
def test_repr(self):
"""Verify text can be represented."""
self.assertEqual("'Hello, world!'", repr(self.text))
def test_str(self):
"""Verify text can be converted to strings."""
self.assertEqual("Hello, world!", str(self.text))
def test_eq(self):
"""Verify text can be equated."""
self.assertEqual(Text("Hello, world!"), self.text)
def test_yaml(self):
"""Verify levels can be converted to their YAML representation."""
self.assertEqual("Hello, world!\n", self.text.yaml)
class TestLevel(unittest.TestCase):
"""Unit tests for the Level class.""" # pylint: disable=W0212
def setUp(self):
self.level_1 = Level('1')
self.level_1_2 = Level('1.2')
self.level_1_2_heading = Level('1.2.0')
self.level_1_2_3 = Level('1.2.3')
def test_init(self):
"""Verify levels can be parsed."""
self.assertEqual((1, 0), Level((1, 0)).value)
self.assertEqual((1,), Level((1)).value)
self.assertEqual((1, 0), Level(Level('1.0')).value)
self.assertEqual((1, 0), Level(1, heading=True).value)
self.assertEqual((1,), Level((1, 0), heading=False).value)
self.assertEqual((1,), Level())
self.assertEqual((1,), Level(None))
self.assertEqual((1,), Level(()).value)
self.assertEqual((1,), Level(0).value)
self.assertEqual((1,), Level('').value)
self.assertEqual((0,), Level((0,)).value)
self.assertEqual((0,), Level('0').value)
self.assertEqual((0,), Level('0.0').value)
def test_repr(self):
"""Verify levels can be represented."""
self.assertEqual("Level('1')", repr(self.level_1))
self.assertEqual("Level('1.2')", repr(self.level_1_2))
self.assertEqual("Level('1.2', heading=True)",
repr(self.level_1_2_heading))
self.assertEqual("Level('1.2.3')", repr(self.level_1_2_3))
def test_str(self):
"""Verify levels can be converted to strings."""
self.assertEqual('1', str(self.level_1))
self.assertEqual('1.2', str(self.level_1_2))
self.assertEqual('1.2.0', str(self.level_1_2_heading))
self.assertEqual('1.2.3', str(self.level_1_2_3))
def test_len(self):
"""Verify a level length is equal to number of non-heading parts."""
self.assertEqual(1, len(self.level_1))
self.assertEqual(2, len(self.level_1_2))
self.assertEqual(2, len(self.level_1_2_heading))
self.assertEqual(3, len(self.level_1_2_3))
def test_eq(self):
"""Verify levels can be equated."""
self.assertNotEqual(self.level_1, self.level_1_2)
self.assertEqual(self.level_1_2, Level([1, 2]))
self.assertEqual(self.level_1_2, (1, 2))
self.assertEqual(self.level_1_2, self.level_1_2_heading)
def test_eq_other(self):
"""Verify levels can be equated with non-levels."""
self.assertNotEqual(self.level_1, None)
self.assertEqual((1, 2, 0), self.level_1_2_heading)
self.assertEqual((1, 2), self.level_1_2_heading)
def test_compare(self):
"""Verify levels can be compared."""
self.assertLess(self.level_1, self.level_1_2)
self.assertLessEqual(self.level_1, self.level_1)
self.assertLessEqual(self.level_1, self.level_1_2)
self.assertLess(self.level_1_2, [1, 3])
self.assertGreater(self.level_1_2_3, self.level_1_2)
self.assertGreaterEqual(self.level_1_2_3, self.level_1_2)
self.assertGreaterEqual(self.level_1_2_3, self.level_1_2_3)
def test_hash(self):
"""Verify level's can be hashed."""
levels = {Level('1.2'): 1, Level('1.2.3'): 2}
self.assertIn(self.level_1_2, levels)
self.assertNotIn(self.level_1_2_heading, levels)
def test_add(self):
"""Verify levels can be incremented."""
level = self.level_1_2
level += 1
self.assertEqual(Level('1.3'), level)
self.assertEqual(Level('1.5'), level + 2)
def test_add_heading(self):
"""Verify (heading) levels can be incremented."""
level = self.level_1_2_heading
level += 2
self.assertEqual(Level('1.4.0'), level)
def test_sub(self):
"""Verify levels can be decremented."""
level = self.level_1_2_3
level -= 1
self.assertEqual(Level('1.2.2'), level)
self.assertEqual(Level('1.2.1'), level - 1)
def test_sub_heading(self):
"""Verify (heading) levels can be decremented."""
level = self.level_1_2_heading
level -= 1
self.assertEqual(Level('1.1.0'), level)
def test_sub_zero(self):
"""Verify levels cannot be decremented to zero."""
level = self.level_1_2
level -= 2
self.assertEqual(Level('1.1'), level)
def test_rshift(self):
"""Verify levels can be indented."""
level = self.level_1_2
level >>= 1
self.assertEqual(Level('1.2.1'), level)
self.assertEqual(Level('1.2.1.1'), level >> 1)
def test_rshift_heading(self):
"""Verify (heading) levels can be indented."""
level = self.level_1_2_heading
level >>= 2
self.assertEqual(Level('1.2.1.1.0'), level)
def test_rshift_negative(self):
"""Verify levels can be indented negatively."""
level = self.level_1_2_3
level >>= -1
self.assertEqual(Level('1.2'), level)
self.assertEqual(Level('1'), level >> -1)
def test_lshift(self):
"""Verify levels can be dedented."""
level = self.level_1_2_3
level <<= 1
self.assertEqual(Level('1.2'), level)
self.assertEqual(Level('1'), level << 1)
def test_lshift_heading(self):
"""Verify (heading) levels can be dedented."""
level = self.level_1_2_heading
level <<= 1
self.assertEqual(Level('1.0'), level)
def test_lshift_negative(self):
"""Verify levels can be dedented negatively."""
level = self.level_1_2_3
level <<= -1
self.assertEqual(Level('1.2.3.1'), level)
self.assertEqual(Level('1.2.3.1.1'), level << -1)
def test_lshift_empty(self):
"""Verify levels can be dedented."""
level = self.level_1_2_3
level <<= 4
self.assertEqual(Level('1'), level)
def test_lshift_zero(self):
"""Verify detenting levels by zero has no effect.."""
level = self.level_1_2_3
level <<= 0
self.assertEqual(Level('1.2.3'), level)
def test_value(self):
"""Verify levels can be converted to their values."""
self.assertEqual((1,), self.level_1.value)
self.assertEqual((1, 2), self.level_1_2.value)
self.assertEqual((1, 2, 0), self.level_1_2_heading.value)
self.assertEqual((1, 2, 3), self.level_1_2_3.value)
def test_yaml(self):
"""Verify levels can be converted to their YAML representation."""
self.assertEqual(1, self.level_1.yaml)
self.assertEqual(1.2, self.level_1_2.yaml)
self.assertEqual('1.2.0', self.level_1_2_heading.yaml)
self.assertEqual('1.2.3', self.level_1_2_3.yaml)
def test_copy(self):
"""Verify levels can be copied."""
level = self.level_1_2.copy()
self.assertEqual(level, self.level_1_2)
level += 1
self.assertNotEqual(level, self.level_1_2)
class TestStamp(unittest.TestCase):
"""Unit tests for the Stamp class.""" # pylint: disable=W0212
def setUp(self):
self.stamp1 = Stamp('abc123')
self.stamp2 = Stamp("Hello, world!", 42, False)
self.stamp3 = Stamp(True)
self.stamp4 = Stamp(False)
self.stamp5 = Stamp()
def test_repr(self):
"""Verify stamps can be represented."""
self.assertEqual("Stamp('abc123')", repr(self.stamp1))
self.assertEqual("Stamp('2645439971b8090da05c7403320afcfa')",
repr(self.stamp2))
self.assertEqual("Stamp(True)", repr(self.stamp3))
self.assertEqual("Stamp(None)", repr(self.stamp4))
self.assertEqual("Stamp(None)", repr(self.stamp5))
def test_str(self):
"""Verify stamps can be converted to strings."""
self.assertEqual('abc123', str(self.stamp1))
self.assertEqual('2645439971b8090da05c7403320afcfa', str(self.stamp2))
self.assertEqual('', str(self.stamp3))
self.assertEqual('', str(self.stamp4))
self.assertEqual('', str(self.stamp5))
def test_bool(self):
"""Verify stamps can be converted to boolean."""
self.assertTrue(self.stamp1)
self.assertTrue(self.stamp2)
self.assertTrue(self.stamp3)
self.assertFalse(self.stamp4)
self.assertFalse(self.stamp5)
def test_eq(self):
"""Verify stamps can be equated."""
self.assertEqual('abc123', self.stamp1)
self.assertEqual('2645439971b8090da05c7403320afcfa', self.stamp2)
self.assertEqual(True, self.stamp3)
self.assertEqual(None, self.stamp4)
self.assertNotEqual(self.stamp1, self.stamp2)
self.assertNotEqual(self.stamp3, self.stamp4)
self.assertEqual(self.stamp4, self.stamp5)
def test_yaml(self):
"""Verify stamps can be converted to their YAML dump format."""
self.assertEqual('abc123', self.stamp1.yaml)
self.assertEqual('2645439971b8090da05c7403320afcfa', self.stamp2.yaml)
self.assertEqual(True, self.stamp3.yaml)
self.assertEqual(None, self.stamp4.yaml)
self.assertEqual(None, self.stamp5.yaml)
class TestReference(unittest.TestCase):
"""Unit tests for the Reference class."""
def setUp(self):
self.ref1 = Reference('abc123')
self.ref2 = Reference('path/to/external.txt', 5, 10)
self.ref2 = Reference('path/to/external.dat', None, None)
self.ref3 = Reference()
| [
"\"\"\"Unit tests for the doorstop.core.types module.\"\"\"\n",
"\n",
"import unittest\n",
"\n",
"from doorstop.common import DoorstopError\n",
"from doorstop.core.types import Prefix, UID, Text, Level, Stamp, Reference\n",
"\n",
"\n",
"class TestPrefix(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the Prefix class.\"\"\" # pylint: disable=W0212\n",
"\n",
" def setUp(self):\n",
" self.prefix1 = Prefix('REQ')\n",
" self.prefix2 = Prefix('TST (@/tst)')\n",
"\n",
" def test_init_empty(self):\n",
" \"\"\"Verify prefixes are parsed correctly (empty).\"\"\"\n",
" self.assertEqual(Prefix(''), Prefix())\n",
" self.assertEqual(Prefix(''), Prefix(None))\n",
"\n",
" def test_init_instance(self):\n",
" \"\"\"Verify prefixes are parsed correctly (instance).\"\"\"\n",
" self.assertIs(self.prefix1, Prefix(self.prefix1))\n",
" self.assertEqual(Prefix(''), Prefix(None))\n",
" self.assertEqual(Prefix(''), Prefix(''))\n",
"\n",
" def test_init_reseved(self):\n",
" \"\"\"Verify an exception is raised for a reserved word.\"\"\"\n",
" self.assertRaises(DoorstopError, Prefix, 'ALL')\n",
"\n",
" def test_repr(self):\n",
" \"\"\"Verify prefixes can be represented.\"\"\"\n",
" self.assertEqual(\"Prefix('REQ')\", repr(self.prefix1))\n",
" self.assertEqual(\"Prefix('TST')\", repr(self.prefix2))\n",
"\n",
" def test_str(self):\n",
" \"\"\"Verify prefixes can be converted to strings.\"\"\"\n",
" self.assertEqual('REQ', str(self.prefix1))\n",
" self.assertEqual('TST', str(self.prefix2))\n",
"\n",
" def test_eq(self):\n",
" \"\"\"Verify prefixes can be equated.\"\"\"\n",
" self.assertEqual(Prefix('REQ'), self.prefix1)\n",
" self.assertNotEqual(self.prefix1, self.prefix2)\n",
" self.assertEqual(Prefix('req'), self.prefix1)\n",
" self.assertEqual('Req', self.prefix1)\n",
" self.assertNotEqual(None, self.prefix1)\n",
" self.assertNotEqual('all', self.prefix1)\n",
"\n",
" def test_sort(self):\n",
" \"\"\"Verify prefixes can be sorted.\"\"\"\n",
" prefixes = [Prefix('a'), Prefix('B'), Prefix('c')]\n",
" self.assertListEqual(prefixes, sorted(prefixes))\n",
"\n",
" def test_short(self):\n",
" \"\"\"Verify the short representation of prefixes is correct.\"\"\"\n",
" self.assertEqual('req', self.prefix1.short)\n",
" self.assertEqual('tst', self.prefix2.short)\n",
"\n",
"\n",
"class TestUID(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the UID class.\"\"\" # pylint: disable=W0212\n",
"\n",
" def setUp(self):\n",
" self.uid1 = UID('REQ001')\n",
" self.uid2 = UID('TST-02')\n",
" self.uid3 = UID('SYS', '-', 3, 5)\n",
" self.uid4 = UID('REQ001', stamp='abc123')\n",
"\n",
" def test_init_str(self):\n",
" \"\"\"Verify UIDs are parsed correctly (string).\"\"\"\n",
" uid = UID('REQ')\n",
" self.assertRaises(DoorstopError, getattr, uid, 'prefix')\n",
" uid = UID('REQ-?')\n",
" self.assertRaises(DoorstopError, getattr, uid, 'number')\n",
"\n",
" def test_init_dict(self):\n",
" \"\"\"Verify UIDs are parsed correctly (dictionary).\"\"\"\n",
" uid = UID({'REQ001': 'abc123'})\n",
" self.assertEqual('REQ', uid.prefix)\n",
" self.assertEqual(1, uid.number)\n",
" self.assertEqual('abc123', uid.stamp)\n",
"\n",
" def test_init_values(self):\n",
" \"\"\"Verify UIDs are parsed correctly (values).\"\"\"\n",
" self.assertRaises(TypeError, UID, 'REQ', '-')\n",
" self.assertRaises(TypeError, UID, 'REQ', '-', 42)\n",
" self.assertRaises(TypeError, UID, 'REQ', '-', 42, 3, 'extra')\n",
"\n",
" def test_init_empty(self):\n",
" \"\"\"Verify UIDs are parsed correctly (empty).\"\"\"\n",
" self.assertEqual(UID(''), UID())\n",
" self.assertEqual(UID(''), UID(None))\n",
"\n",
" def test_init_instance(self):\n",
" \"\"\"Verify UIDs are parsed correctly (instance).\"\"\"\n",
" self.assertIs(self.uid1, UID(self.uid1))\n",
" self.assertIs(self.uid4, UID(self.uid4))\n",
"\n",
" def test_repr(self):\n",
" \"\"\"Verify UIDs can be represented.\"\"\"\n",
" self.assertEqual(\"UID('REQ001')\", repr(self.uid1))\n",
" self.assertEqual(\"UID('TST-02')\", repr(self.uid2))\n",
" self.assertEqual(\"UID('SYS-00003')\", repr(self.uid3))\n",
" self.assertEqual(\"UID('REQ001', stamp='abc123')\", repr(self.uid4))\n",
"\n",
" def test_str(self):\n",
" \"\"\"Verify UIDs can be converted to strings.\"\"\"\n",
" self.assertEqual('REQ001', str(self.uid1))\n",
" self.assertEqual('TST-02', str(self.uid2))\n",
" self.assertEqual('SYS-00003', str(self.uid3))\n",
"\n",
" def test_eq(self):\n",
" \"\"\"Verify UIDs can be equated.\"\"\"\n",
" self.assertEqual(UID('REQ.001'), UID('req', '', 1, 3))\n",
" self.assertEqual(UID('REQ1'), UID('REQ', '', 1, 3))\n",
" self.assertNotEqual(UID('REQ.2'), UID('REQ', '-', 1, 3))\n",
" self.assertEqual(UID('REQ1'), UID('REQ001 (@/req1.yml)'))\n",
" self.assertEqual('req1', UID('REQ001'))\n",
" self.assertNotEqual(None, UID('REQ001'))\n",
" self.assertEqual(self.uid1, self.uid4)\n",
"\n",
" def test_sort(self):\n",
" \"\"\"Verify UIDs can be sorted.\"\"\"\n",
" uids = [UID('a'), UID('a1'), UID('a2'), UID('b')]\n",
" self.assertListEqual(uids, sorted(uids))\n",
"\n",
" def test_prefix(self):\n",
" \"\"\"Verify UIDs have prefixes.\"\"\"\n",
" self.assertEqual('REQ', self.uid1.prefix)\n",
" self.assertEqual('TST', self.uid2.prefix)\n",
" self.assertEqual('SYS', self.uid3.prefix)\n",
"\n",
" def test_number(self):\n",
" \"\"\"Verify UIDs have numbers.\"\"\"\n",
" self.assertEqual(1, self.uid1.number)\n",
" self.assertEqual(2, self.uid2.number)\n",
" self.assertEqual(3, self.uid3.number)\n",
"\n",
" def test_short(self):\n",
" \"\"\"Verify the short representation of IDs is correct.\"\"\"\n",
" self.assertEqual('req1', self.uid1.short)\n",
" self.assertEqual('tst2', self.uid2.short)\n",
" self.assertEqual('sys3', self.uid3.short)\n",
"\n",
" def test_string(self):\n",
" \"\"\"Verify UIDs can be converted to string including stamps.\"\"\"\n",
" self.assertEqual(\"REQ001\", self.uid1.string)\n",
" self.assertEqual(\"REQ001:abc123\", self.uid4.string)\n",
"\n",
" def test_stamp(self):\n",
" \"\"\"Verify stamps are stored correctly.\"\"\"\n",
" self.assertEqual('abc123', self.uid4.stamp)\n",
" self.assertEqual('abc123', UID(self.uid4).stamp)\n",
" self.assertEqual('def456', UID(self.uid4, stamp='def456').stamp)\n",
" self.assertEqual(True, UID({'REQ001': 1}).stamp)\n",
" self.assertEqual(True, UID(\"REQ001:1\").stamp)\n",
"\n",
"\n",
"class TestText(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the Text class.\"\"\" # pylint: disable=W0212\n",
"\n",
" def setUp(self):\n",
" self.text = Text(\"Hello, \\nworld! \")\n",
"\n",
" def test_init(self):\n",
" \"\"\"Verify Text is parsed correctly.\"\"\"\n",
" self.assertEqual(Text(\"\"), Text())\n",
" self.assertEqual(Text(\"\"), Text(None))\n",
" self.assertEqual(Text(\"\"), Text(\"\"))\n",
"\n",
" def test_repr(self):\n",
" \"\"\"Verify text can be represented.\"\"\"\n",
" self.assertEqual(\"'Hello, world!'\", repr(self.text))\n",
"\n",
" def test_str(self):\n",
" \"\"\"Verify text can be converted to strings.\"\"\"\n",
" self.assertEqual(\"Hello, world!\", str(self.text))\n",
"\n",
" def test_eq(self):\n",
" \"\"\"Verify text can be equated.\"\"\"\n",
" self.assertEqual(Text(\"Hello, world!\"), self.text)\n",
"\n",
" def test_yaml(self):\n",
" \"\"\"Verify levels can be converted to their YAML representation.\"\"\"\n",
" self.assertEqual(\"Hello, world!\\n\", self.text.yaml)\n",
"\n",
"\n",
"class TestLevel(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the Level class.\"\"\" # pylint: disable=W0212\n",
"\n",
" def setUp(self):\n",
" self.level_1 = Level('1')\n",
" self.level_1_2 = Level('1.2')\n",
" self.level_1_2_heading = Level('1.2.0')\n",
" self.level_1_2_3 = Level('1.2.3')\n",
"\n",
" def test_init(self):\n",
" \"\"\"Verify levels can be parsed.\"\"\"\n",
" self.assertEqual((1, 0), Level((1, 0)).value)\n",
" self.assertEqual((1,), Level((1)).value)\n",
" self.assertEqual((1, 0), Level(Level('1.0')).value)\n",
" self.assertEqual((1, 0), Level(1, heading=True).value)\n",
" self.assertEqual((1,), Level((1, 0), heading=False).value)\n",
" self.assertEqual((1,), Level())\n",
" self.assertEqual((1,), Level(None))\n",
" self.assertEqual((1,), Level(()).value)\n",
" self.assertEqual((1,), Level(0).value)\n",
" self.assertEqual((1,), Level('').value)\n",
" self.assertEqual((0,), Level((0,)).value)\n",
" self.assertEqual((0,), Level('0').value)\n",
" self.assertEqual((0,), Level('0.0').value)\n",
"\n",
" def test_repr(self):\n",
" \"\"\"Verify levels can be represented.\"\"\"\n",
" self.assertEqual(\"Level('1')\", repr(self.level_1))\n",
" self.assertEqual(\"Level('1.2')\", repr(self.level_1_2))\n",
" self.assertEqual(\"Level('1.2', heading=True)\",\n",
" repr(self.level_1_2_heading))\n",
" self.assertEqual(\"Level('1.2.3')\", repr(self.level_1_2_3))\n",
"\n",
" def test_str(self):\n",
" \"\"\"Verify levels can be converted to strings.\"\"\"\n",
" self.assertEqual('1', str(self.level_1))\n",
" self.assertEqual('1.2', str(self.level_1_2))\n",
" self.assertEqual('1.2.0', str(self.level_1_2_heading))\n",
" self.assertEqual('1.2.3', str(self.level_1_2_3))\n",
"\n",
" def test_len(self):\n",
" \"\"\"Verify a level length is equal to number of non-heading parts.\"\"\"\n",
" self.assertEqual(1, len(self.level_1))\n",
" self.assertEqual(2, len(self.level_1_2))\n",
" self.assertEqual(2, len(self.level_1_2_heading))\n",
" self.assertEqual(3, len(self.level_1_2_3))\n",
"\n",
" def test_eq(self):\n",
" \"\"\"Verify levels can be equated.\"\"\"\n",
" self.assertNotEqual(self.level_1, self.level_1_2)\n",
" self.assertEqual(self.level_1_2, Level([1, 2]))\n",
" self.assertEqual(self.level_1_2, (1, 2))\n",
" self.assertEqual(self.level_1_2, self.level_1_2_heading)\n",
"\n",
" def test_eq_other(self):\n",
" \"\"\"Verify levels can be equated with non-levels.\"\"\"\n",
" self.assertNotEqual(self.level_1, None)\n",
" self.assertEqual((1, 2, 0), self.level_1_2_heading)\n",
" self.assertEqual((1, 2), self.level_1_2_heading)\n",
"\n",
" def test_compare(self):\n",
" \"\"\"Verify levels can be compared.\"\"\"\n",
" self.assertLess(self.level_1, self.level_1_2)\n",
" self.assertLessEqual(self.level_1, self.level_1)\n",
" self.assertLessEqual(self.level_1, self.level_1_2)\n",
" self.assertLess(self.level_1_2, [1, 3])\n",
" self.assertGreater(self.level_1_2_3, self.level_1_2)\n",
" self.assertGreaterEqual(self.level_1_2_3, self.level_1_2)\n",
" self.assertGreaterEqual(self.level_1_2_3, self.level_1_2_3)\n",
"\n",
" def test_hash(self):\n",
" \"\"\"Verify level's can be hashed.\"\"\"\n",
" levels = {Level('1.2'): 1, Level('1.2.3'): 2}\n",
" self.assertIn(self.level_1_2, levels)\n",
" self.assertNotIn(self.level_1_2_heading, levels)\n",
"\n",
" def test_add(self):\n",
" \"\"\"Verify levels can be incremented.\"\"\"\n",
" level = self.level_1_2\n",
" level += 1\n",
" self.assertEqual(Level('1.3'), level)\n",
" self.assertEqual(Level('1.5'), level + 2)\n",
"\n",
" def test_add_heading(self):\n",
" \"\"\"Verify (heading) levels can be incremented.\"\"\"\n",
" level = self.level_1_2_heading\n",
" level += 2\n",
" self.assertEqual(Level('1.4.0'), level)\n",
"\n",
" def test_sub(self):\n",
" \"\"\"Verify levels can be decremented.\"\"\"\n",
" level = self.level_1_2_3\n",
" level -= 1\n",
" self.assertEqual(Level('1.2.2'), level)\n",
" self.assertEqual(Level('1.2.1'), level - 1)\n",
"\n",
" def test_sub_heading(self):\n",
" \"\"\"Verify (heading) levels can be decremented.\"\"\"\n",
" level = self.level_1_2_heading\n",
" level -= 1\n",
" self.assertEqual(Level('1.1.0'), level)\n",
"\n",
" def test_sub_zero(self):\n",
" \"\"\"Verify levels cannot be decremented to zero.\"\"\"\n",
" level = self.level_1_2\n",
" level -= 2\n",
" self.assertEqual(Level('1.1'), level)\n",
"\n",
" def test_rshift(self):\n",
" \"\"\"Verify levels can be indented.\"\"\"\n",
" level = self.level_1_2\n",
" level >>= 1\n",
" self.assertEqual(Level('1.2.1'), level)\n",
" self.assertEqual(Level('1.2.1.1'), level >> 1)\n",
"\n",
" def test_rshift_heading(self):\n",
" \"\"\"Verify (heading) levels can be indented.\"\"\"\n",
" level = self.level_1_2_heading\n",
" level >>= 2\n",
" self.assertEqual(Level('1.2.1.1.0'), level)\n",
"\n",
" def test_rshift_negative(self):\n",
" \"\"\"Verify levels can be indented negatively.\"\"\"\n",
" level = self.level_1_2_3\n",
" level >>= -1\n",
" self.assertEqual(Level('1.2'), level)\n",
" self.assertEqual(Level('1'), level >> -1)\n",
"\n",
" def test_lshift(self):\n",
" \"\"\"Verify levels can be dedented.\"\"\"\n",
" level = self.level_1_2_3\n",
" level <<= 1\n",
" self.assertEqual(Level('1.2'), level)\n",
" self.assertEqual(Level('1'), level << 1)\n",
"\n",
" def test_lshift_heading(self):\n",
" \"\"\"Verify (heading) levels can be dedented.\"\"\"\n",
" level = self.level_1_2_heading\n",
" level <<= 1\n",
" self.assertEqual(Level('1.0'), level)\n",
"\n",
" def test_lshift_negative(self):\n",
" \"\"\"Verify levels can be dedented negatively.\"\"\"\n",
" level = self.level_1_2_3\n",
" level <<= -1\n",
" self.assertEqual(Level('1.2.3.1'), level)\n",
" self.assertEqual(Level('1.2.3.1.1'), level << -1)\n",
"\n",
" def test_lshift_empty(self):\n",
" \"\"\"Verify levels can be dedented.\"\"\"\n",
" level = self.level_1_2_3\n",
" level <<= 4\n",
" self.assertEqual(Level('1'), level)\n",
"\n",
" def test_lshift_zero(self):\n",
" \"\"\"Verify detenting levels by zero has no effect..\"\"\"\n",
" level = self.level_1_2_3\n",
" level <<= 0\n",
" self.assertEqual(Level('1.2.3'), level)\n",
"\n",
" def test_value(self):\n",
" \"\"\"Verify levels can be converted to their values.\"\"\"\n",
" self.assertEqual((1,), self.level_1.value)\n",
" self.assertEqual((1, 2), self.level_1_2.value)\n",
" self.assertEqual((1, 2, 0), self.level_1_2_heading.value)\n",
" self.assertEqual((1, 2, 3), self.level_1_2_3.value)\n",
"\n",
" def test_yaml(self):\n",
" \"\"\"Verify levels can be converted to their YAML representation.\"\"\"\n",
" self.assertEqual(1, self.level_1.yaml)\n",
" self.assertEqual(1.2, self.level_1_2.yaml)\n",
" self.assertEqual('1.2.0', self.level_1_2_heading.yaml)\n",
" self.assertEqual('1.2.3', self.level_1_2_3.yaml)\n",
"\n",
" def test_copy(self):\n",
" \"\"\"Verify levels can be copied.\"\"\"\n",
" level = self.level_1_2.copy()\n",
" self.assertEqual(level, self.level_1_2)\n",
" level += 1\n",
" self.assertNotEqual(level, self.level_1_2)\n",
"\n",
"\n",
"class TestStamp(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the Stamp class.\"\"\" # pylint: disable=W0212\n",
"\n",
" def setUp(self):\n",
" self.stamp1 = Stamp('abc123')\n",
" self.stamp2 = Stamp(\"Hello, world!\", 42, False)\n",
" self.stamp3 = Stamp(True)\n",
" self.stamp4 = Stamp(False)\n",
" self.stamp5 = Stamp()\n",
"\n",
" def test_repr(self):\n",
" \"\"\"Verify stamps can be represented.\"\"\"\n",
" self.assertEqual(\"Stamp('abc123')\", repr(self.stamp1))\n",
" self.assertEqual(\"Stamp('2645439971b8090da05c7403320afcfa')\",\n",
" repr(self.stamp2))\n",
" self.assertEqual(\"Stamp(True)\", repr(self.stamp3))\n",
" self.assertEqual(\"Stamp(None)\", repr(self.stamp4))\n",
" self.assertEqual(\"Stamp(None)\", repr(self.stamp5))\n",
"\n",
" def test_str(self):\n",
" \"\"\"Verify stamps can be converted to strings.\"\"\"\n",
" self.assertEqual('abc123', str(self.stamp1))\n",
" self.assertEqual('2645439971b8090da05c7403320afcfa', str(self.stamp2))\n",
" self.assertEqual('', str(self.stamp3))\n",
" self.assertEqual('', str(self.stamp4))\n",
" self.assertEqual('', str(self.stamp5))\n",
"\n",
" def test_bool(self):\n",
" \"\"\"Verify stamps can be converted to boolean.\"\"\"\n",
" self.assertTrue(self.stamp1)\n",
" self.assertTrue(self.stamp2)\n",
" self.assertTrue(self.stamp3)\n",
" self.assertFalse(self.stamp4)\n",
" self.assertFalse(self.stamp5)\n",
"\n",
" def test_eq(self):\n",
" \"\"\"Verify stamps can be equated.\"\"\"\n",
" self.assertEqual('abc123', self.stamp1)\n",
" self.assertEqual('2645439971b8090da05c7403320afcfa', self.stamp2)\n",
" self.assertEqual(True, self.stamp3)\n",
" self.assertEqual(None, self.stamp4)\n",
" self.assertNotEqual(self.stamp1, self.stamp2)\n",
" self.assertNotEqual(self.stamp3, self.stamp4)\n",
" self.assertEqual(self.stamp4, self.stamp5)\n",
"\n",
" def test_yaml(self):\n",
" \"\"\"Verify stamps can be converted to their YAML dump format.\"\"\"\n",
" self.assertEqual('abc123', self.stamp1.yaml)\n",
" self.assertEqual('2645439971b8090da05c7403320afcfa', self.stamp2.yaml)\n",
" self.assertEqual(True, self.stamp3.yaml)\n",
" self.assertEqual(None, self.stamp4.yaml)\n",
" self.assertEqual(None, self.stamp5.yaml)\n",
"\n",
"\n",
"class TestReference(unittest.TestCase):\n",
"\n",
" \"\"\"Unit tests for the Reference class.\"\"\"\n",
"\n",
" def setUp(self):\n",
" self.ref1 = Reference('abc123')\n",
" self.ref2 = Reference('path/to/external.txt', 5, 10)\n",
" self.ref2 = Reference('path/to/external.dat', None, None)\n",
" self.ref3 = Reference()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 438 | 0 |
#!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""`StackContext` allows applications to maintain threadlocal-like state
that follows execution as it moves to other execution contexts.
The motivating examples are to eliminate the need for explicit
``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
allow some additional context to be kept for logging.
This is slightly magic, but it's an extension of the idea that an
exception handler is a kind of stack-local state and when that stack
is suspended and resumed in a new context that state needs to be
preserved. `StackContext` shifts the burden of restoring that state
from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
in ``async_callback``) to the mechanisms that transfer control from
one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
thread pools, etc).
Example usage::
@contextlib.contextmanager
def die_on_error():
try:
yield
except Exception:
logging.error("exception in asynchronous operation",exc_info=True)
sys.exit(1)
with StackContext(die_on_error):
# Any exception thrown here *or in callback and its descendants*
# will cause the process to exit instead of spinning endlessly
# in the ioloop.
http_client.fetch(url, callback)
ioloop.start()
Most applications shouldn't have to work with `StackContext` directly.
Here are a few rules of thumb for when it's necessary:
* If you're writing an asynchronous library that doesn't rely on a
stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
(for example, if you're writing a thread pool), use
`.stack_context.wrap()` before any asynchronous operations to capture the
stack context from where the operation was started.
* If you're writing an asynchronous library that has some shared
resources (such as a connection pool), create those shared resources
within a ``with stack_context.NullContext():`` block. This will prevent
``StackContexts`` from leaking from one request to another.
* If you want to write something like an exception handler that will
persist across asynchronous calls, create a new `StackContext` (or
`ExceptionStackContext`), and make your asynchronous calls in a ``with``
block that references your `StackContext`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
import threading
from tornado.util import raise_exc_info
class StackContextInconsistentError(Exception):
pass
class _State(threading.local):
def __init__(self):
self.contexts = (tuple(), None)
_state = _State()
class StackContext(object):
"""Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
The result of ``with StackContext() as cb:`` is a deactivation
callback. Run this callback when the StackContext is no longer
needed to ensure that it is not propagated any further (note that
deactivating a context does not affect any instances of that
context that are currently pending). This is an advanced feature
and not necessary in most applications.
"""
def __init__(self, context_factory):
self.context_factory = context_factory
self.contexts = []
self.active = True
def _deactivate(self):
self.active = False
# StackContext protocol
def enter(self):
context = self.context_factory()
self.contexts.append(context)
context.__enter__()
def exit(self, type, value, traceback):
context = self.contexts.pop()
context.__exit__(type, value, traceback)
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0] + (self,), self)
_state.contexts = self.new_contexts
try:
self.enter()
except:
_state.contexts = self.old_contexts
raise
return self._deactivate
def __exit__(self, type, value, traceback):
try:
self.exit(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
# Generator coroutines and with-statements with non-local
# effects interact badly. Check here for signs of
# the stack getting out of sync.
# Note that this check comes after restoring _state.context
# so that if it fails things are left in a (relatively)
# consistent state.
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class ExceptionStackContext(object):
"""Specialization of StackContext for exception handling.
The supplied ``exception_handler`` function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
``exc_info`` triple ``(type, value, traceback)`` will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
"""
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.active = True
def _deactivate(self):
self.active = False
def exit(self, type, value, traceback):
if type is not None:
return self.exception_handler(type, value, traceback)
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0], self)
_state.contexts = self.new_contexts
return self._deactivate
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class NullContext(object):
"""Resets the `StackContext`.
Useful when creating a shared resource on demand (e.g. an
`.AsyncHTTPClient`) where the stack that caused the creating is
not relevant to future operations.
"""
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (tuple(), None)
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
def _remove_deactivated(contexts):
"""Remove deactivated handlers from the chain"""
# Clean ctx handlers
stack_contexts = tuple([h for h in contexts[0] if h.active])
# Find new head
head = contexts[1]
while head is not None and not head.active:
head = head.old_contexts[1]
# Process chain
ctx = head
while ctx is not None:
parent = ctx.old_contexts[1]
while parent is not None:
if parent.active:
break
ctx.old_contexts = parent.old_contexts
parent = parent.old_contexts[1]
ctx = parent
return (stack_contexts, head)
def wrap(fn):
"""Returns a callable object that will restore the current `StackContext`
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
"""
# Check if function is already wrapped
if fn is None or hasattr(fn, '_wrapped'):
return fn
# Capture current stack head
# TODO: Any other better way to store contexts and update them in wrapped function?
cap_contexts = [_state.contexts]
if not cap_contexts[0][0] and not cap_contexts[0][1]:
# Fast path when there are no active contexts.
def null_wrapper(*args, **kwargs):
try:
current_state = _state.contexts
_state.contexts = cap_contexts[0]
return fn(*args, **kwargs)
finally:
_state.contexts = current_state
null_wrapper._wrapped = True
return null_wrapper
def wrapped(*args, **kwargs):
ret = None
try:
# Capture old state
current_state = _state.contexts
# Remove deactivated items
cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
# Force new state
_state.contexts = contexts
# Current exception
exc = (None, None, None)
top = None
# Apply stack contexts
last_ctx = 0
stack = contexts[0]
# Apply state
for n in stack:
try:
n.enter()
last_ctx += 1
except:
# Exception happened. Record exception info and store top-most handler
exc = sys.exc_info()
top = n.old_contexts[1]
# Execute callback if no exception happened while restoring state
if top is None:
try:
ret = fn(*args, **kwargs)
except:
exc = sys.exc_info()
top = contexts[1]
# If there was exception, try to handle it by going through the exception chain
if top is not None:
exc = _handle_exception(top, exc)
else:
# Otherwise take shorter path and run stack contexts in reverse order
while last_ctx > 0:
last_ctx -= 1
c = stack[last_ctx]
try:
c.exit(*exc)
except:
exc = sys.exc_info()
top = c.old_contexts[1]
break
else:
top = None
# If if exception happened while unrolling, take longer exception handler path
if top is not None:
exc = _handle_exception(top, exc)
# If exception was not handled, raise it
if exc != (None, None, None):
raise_exc_info(exc)
finally:
_state.contexts = current_state
return ret
wrapped._wrapped = True
return wrapped
def _handle_exception(tail, exc):
while tail is not None:
try:
if tail.exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
tail = tail.old_contexts[1]
return exc
def run_with_stack_context(context, func):
"""Run a coroutine ``func`` in the given `StackContext`.
It is not safe to have a ``yield`` statement within a ``with StackContext``
block, so it is difficult to use stack context with `.gen.coroutine`.
This helper function runs the function in the correct context while
keeping the ``yield`` and ``with`` statements syntactically separate.
Example::
@gen.coroutine
def incorrect():
with StackContext(ctx):
# ERROR: this will raise StackContextInconsistentError
yield other_coroutine()
@gen.coroutine
def correct():
yield run_with_stack_context(StackContext(ctx), other_coroutine)
.. versionadded:: 3.1
"""
with context:
return func()
| [
"#!/usr/bin/env python\n",
"#\n",
"# Copyright 2010 Facebook\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"\"\"\"`StackContext` allows applications to maintain threadlocal-like state\n",
"that follows execution as it moves to other execution contexts.\n",
"\n",
"The motivating examples are to eliminate the need for explicit\n",
"``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to\n",
"allow some additional context to be kept for logging.\n",
"\n",
"This is slightly magic, but it's an extension of the idea that an\n",
"exception handler is a kind of stack-local state and when that stack\n",
"is suspended and resumed in a new context that state needs to be\n",
"preserved. `StackContext` shifts the burden of restoring that state\n",
"from each call site (e.g. wrapping each `.AsyncHTTPClient` callback\n",
"in ``async_callback``) to the mechanisms that transfer control from\n",
"one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,\n",
"thread pools, etc).\n",
"\n",
"Example usage::\n",
"\n",
" @contextlib.contextmanager\n",
" def die_on_error():\n",
" try:\n",
" yield\n",
" except Exception:\n",
" logging.error(\"exception in asynchronous operation\",exc_info=True)\n",
" sys.exit(1)\n",
"\n",
" with StackContext(die_on_error):\n",
" # Any exception thrown here *or in callback and its descendants*\n",
" # will cause the process to exit instead of spinning endlessly\n",
" # in the ioloop.\n",
" http_client.fetch(url, callback)\n",
" ioloop.start()\n",
"\n",
"Most applications shouldn't have to work with `StackContext` directly.\n",
"Here are a few rules of thumb for when it's necessary:\n",
"\n",
"* If you're writing an asynchronous library that doesn't rely on a\n",
" stack_context-aware library like `tornado.ioloop` or `tornado.iostream`\n",
" (for example, if you're writing a thread pool), use\n",
" `.stack_context.wrap()` before any asynchronous operations to capture the\n",
" stack context from where the operation was started.\n",
"\n",
"* If you're writing an asynchronous library that has some shared\n",
" resources (such as a connection pool), create those shared resources\n",
" within a ``with stack_context.NullContext():`` block. This will prevent\n",
" ``StackContexts`` from leaking from one request to another.\n",
"\n",
"* If you want to write something like an exception handler that will\n",
" persist across asynchronous calls, create a new `StackContext` (or\n",
" `ExceptionStackContext`), and make your asynchronous calls in a ``with``\n",
" block that references your `StackContext`.\n",
"\"\"\"\n",
"\n",
"from __future__ import absolute_import, division, print_function, with_statement\n",
"\n",
"import sys\n",
"import threading\n",
"\n",
"from tornado.util import raise_exc_info\n",
"\n",
"\n",
"class StackContextInconsistentError(Exception):\n",
" pass\n",
"\n",
"\n",
"class _State(threading.local):\n",
" def __init__(self):\n",
" self.contexts = (tuple(), None)\n",
"_state = _State()\n",
"\n",
"\n",
"class StackContext(object):\n",
" \"\"\"Establishes the given context as a StackContext that will be transferred.\n",
"\n",
" Note that the parameter is a callable that returns a context\n",
" manager, not the context itself. That is, where for a\n",
" non-transferable context manager you would say::\n",
"\n",
" with my_context():\n",
"\n",
" StackContext takes the function itself rather than its result::\n",
"\n",
" with StackContext(my_context):\n",
"\n",
" The result of ``with StackContext() as cb:`` is a deactivation\n",
" callback. Run this callback when the StackContext is no longer\n",
" needed to ensure that it is not propagated any further (note that\n",
" deactivating a context does not affect any instances of that\n",
" context that are currently pending). This is an advanced feature\n",
" and not necessary in most applications.\n",
" \"\"\"\n",
" def __init__(self, context_factory):\n",
" self.context_factory = context_factory\n",
" self.contexts = []\n",
" self.active = True\n",
"\n",
" def _deactivate(self):\n",
" self.active = False\n",
"\n",
" # StackContext protocol\n",
" def enter(self):\n",
" context = self.context_factory()\n",
" self.contexts.append(context)\n",
" context.__enter__()\n",
"\n",
" def exit(self, type, value, traceback):\n",
" context = self.contexts.pop()\n",
" context.__exit__(type, value, traceback)\n",
"\n",
" # Note that some of this code is duplicated in ExceptionStackContext\n",
" # below. ExceptionStackContext is more common and doesn't need\n",
" # the full generality of this class.\n",
" def __enter__(self):\n",
" self.old_contexts = _state.contexts\n",
" self.new_contexts = (self.old_contexts[0] + (self,), self)\n",
" _state.contexts = self.new_contexts\n",
"\n",
" try:\n",
" self.enter()\n",
" except:\n",
" _state.contexts = self.old_contexts\n",
" raise\n",
"\n",
" return self._deactivate\n",
"\n",
" def __exit__(self, type, value, traceback):\n",
" try:\n",
" self.exit(type, value, traceback)\n",
" finally:\n",
" final_contexts = _state.contexts\n",
" _state.contexts = self.old_contexts\n",
"\n",
" # Generator coroutines and with-statements with non-local\n",
" # effects interact badly. Check here for signs of\n",
" # the stack getting out of sync.\n",
" # Note that this check comes after restoring _state.context\n",
" # so that if it fails things are left in a (relatively)\n",
" # consistent state.\n",
" if final_contexts is not self.new_contexts:\n",
" raise StackContextInconsistentError(\n",
" 'stack_context inconsistency (may be caused by yield '\n",
" 'within a \"with StackContext\" block)')\n",
"\n",
" # Break up a reference to itself to allow for faster GC on CPython.\n",
" self.new_contexts = None\n",
"\n",
"\n",
"class ExceptionStackContext(object):\n",
" \"\"\"Specialization of StackContext for exception handling.\n",
"\n",
" The supplied ``exception_handler`` function will be called in the\n",
" event of an uncaught exception in this context. The semantics are\n",
" similar to a try/finally clause, and intended use cases are to log\n",
" an error, close a socket, or similar cleanup actions. The\n",
" ``exc_info`` triple ``(type, value, traceback)`` will be passed to the\n",
" exception_handler function.\n",
"\n",
" If the exception handler returns true, the exception will be\n",
" consumed and will not be propagated to other exception handlers.\n",
" \"\"\"\n",
" def __init__(self, exception_handler):\n",
" self.exception_handler = exception_handler\n",
" self.active = True\n",
"\n",
" def _deactivate(self):\n",
" self.active = False\n",
"\n",
" def exit(self, type, value, traceback):\n",
" if type is not None:\n",
" return self.exception_handler(type, value, traceback)\n",
"\n",
" def __enter__(self):\n",
" self.old_contexts = _state.contexts\n",
" self.new_contexts = (self.old_contexts[0], self)\n",
" _state.contexts = self.new_contexts\n",
"\n",
" return self._deactivate\n",
"\n",
" def __exit__(self, type, value, traceback):\n",
" try:\n",
" if type is not None:\n",
" return self.exception_handler(type, value, traceback)\n",
" finally:\n",
" final_contexts = _state.contexts\n",
" _state.contexts = self.old_contexts\n",
"\n",
" if final_contexts is not self.new_contexts:\n",
" raise StackContextInconsistentError(\n",
" 'stack_context inconsistency (may be caused by yield '\n",
" 'within a \"with StackContext\" block)')\n",
"\n",
" # Break up a reference to itself to allow for faster GC on CPython.\n",
" self.new_contexts = None\n",
"\n",
"\n",
"class NullContext(object):\n",
" \"\"\"Resets the `StackContext`.\n",
"\n",
" Useful when creating a shared resource on demand (e.g. an\n",
" `.AsyncHTTPClient`) where the stack that caused the creating is\n",
" not relevant to future operations.\n",
" \"\"\"\n",
" def __enter__(self):\n",
" self.old_contexts = _state.contexts\n",
" _state.contexts = (tuple(), None)\n",
"\n",
" def __exit__(self, type, value, traceback):\n",
" _state.contexts = self.old_contexts\n",
"\n",
"\n",
"def _remove_deactivated(contexts):\n",
" \"\"\"Remove deactivated handlers from the chain\"\"\"\n",
" # Clean ctx handlers\n",
" stack_contexts = tuple([h for h in contexts[0] if h.active])\n",
"\n",
" # Find new head\n",
" head = contexts[1]\n",
" while head is not None and not head.active:\n",
" head = head.old_contexts[1]\n",
"\n",
" # Process chain\n",
" ctx = head\n",
" while ctx is not None:\n",
" parent = ctx.old_contexts[1]\n",
"\n",
" while parent is not None:\n",
" if parent.active:\n",
" break\n",
" ctx.old_contexts = parent.old_contexts\n",
" parent = parent.old_contexts[1]\n",
"\n",
" ctx = parent\n",
"\n",
" return (stack_contexts, head)\n",
"\n",
"\n",
"def wrap(fn):\n",
" \"\"\"Returns a callable object that will restore the current `StackContext`\n",
" when executed.\n",
"\n",
" Use this whenever saving a callback to be executed later in a\n",
" different execution context (either in a different thread or\n",
" asynchronously in the same thread).\n",
" \"\"\"\n",
" # Check if function is already wrapped\n",
" if fn is None or hasattr(fn, '_wrapped'):\n",
" return fn\n",
"\n",
" # Capture current stack head\n",
" # TODO: Any other better way to store contexts and update them in wrapped function?\n",
" cap_contexts = [_state.contexts]\n",
"\n",
" if not cap_contexts[0][0] and not cap_contexts[0][1]:\n",
" # Fast path when there are no active contexts.\n",
" def null_wrapper(*args, **kwargs):\n",
" try:\n",
" current_state = _state.contexts\n",
" _state.contexts = cap_contexts[0]\n",
" return fn(*args, **kwargs)\n",
" finally:\n",
" _state.contexts = current_state\n",
" null_wrapper._wrapped = True\n",
" return null_wrapper\n",
"\n",
" def wrapped(*args, **kwargs):\n",
" ret = None\n",
" try:\n",
" # Capture old state\n",
" current_state = _state.contexts\n",
"\n",
" # Remove deactivated items\n",
" cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])\n",
"\n",
" # Force new state\n",
" _state.contexts = contexts\n",
"\n",
" # Current exception\n",
" exc = (None, None, None)\n",
" top = None\n",
"\n",
" # Apply stack contexts\n",
" last_ctx = 0\n",
" stack = contexts[0]\n",
"\n",
" # Apply state\n",
" for n in stack:\n",
" try:\n",
" n.enter()\n",
" last_ctx += 1\n",
" except:\n",
" # Exception happened. Record exception info and store top-most handler\n",
" exc = sys.exc_info()\n",
" top = n.old_contexts[1]\n",
"\n",
" # Execute callback if no exception happened while restoring state\n",
" if top is None:\n",
" try:\n",
" ret = fn(*args, **kwargs)\n",
" except:\n",
" exc = sys.exc_info()\n",
" top = contexts[1]\n",
"\n",
" # If there was exception, try to handle it by going through the exception chain\n",
" if top is not None:\n",
" exc = _handle_exception(top, exc)\n",
" else:\n",
" # Otherwise take shorter path and run stack contexts in reverse order\n",
" while last_ctx > 0:\n",
" last_ctx -= 1\n",
" c = stack[last_ctx]\n",
"\n",
" try:\n",
" c.exit(*exc)\n",
" except:\n",
" exc = sys.exc_info()\n",
" top = c.old_contexts[1]\n",
" break\n",
" else:\n",
" top = None\n",
"\n",
" # If if exception happened while unrolling, take longer exception handler path\n",
" if top is not None:\n",
" exc = _handle_exception(top, exc)\n",
"\n",
" # If exception was not handled, raise it\n",
" if exc != (None, None, None):\n",
" raise_exc_info(exc)\n",
" finally:\n",
" _state.contexts = current_state\n",
" return ret\n",
"\n",
" wrapped._wrapped = True\n",
" return wrapped\n",
"\n",
"\n",
"def _handle_exception(tail, exc):\n",
" while tail is not None:\n",
" try:\n",
" if tail.exit(*exc):\n",
" exc = (None, None, None)\n",
" except:\n",
" exc = sys.exc_info()\n",
"\n",
" tail = tail.old_contexts[1]\n",
"\n",
" return exc\n",
"\n",
"\n",
"def run_with_stack_context(context, func):\n",
" \"\"\"Run a coroutine ``func`` in the given `StackContext`.\n",
"\n",
" It is not safe to have a ``yield`` statement within a ``with StackContext``\n",
" block, so it is difficult to use stack context with `.gen.coroutine`.\n",
" This helper function runs the function in the correct context while\n",
" keeping the ``yield`` and ``with`` statements syntactically separate.\n",
"\n",
" Example::\n",
"\n",
" @gen.coroutine\n",
" def incorrect():\n",
" with StackContext(ctx):\n",
" # ERROR: this will raise StackContextInconsistentError\n",
" yield other_coroutine()\n",
"\n",
" @gen.coroutine\n",
" def correct():\n",
" yield run_with_stack_context(StackContext(ctx), other_coroutine)\n",
"\n",
" .. versionadded:: 3.1\n",
" \"\"\"\n",
" with context:\n",
" return func()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 388 | 0.000979 |
import datetime
import pytz
import recurrence
from django.test import TestCase
from django.test import override_settings
from radioco.apps.programmes.models import Programme
from radioco.apps.radioco.test_utils import TestDataMixin, SPAIN_TZ
from radioco.apps.radioco.tz_utils import transform_dt_to_default_tz
from radioco.apps.schedules.models import Schedule
BEFORE_CEST_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 1, 59, 59)) # CET+1:00:00
AFTER_CEST_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 3, 0, 0)) # CEST+2:00:00
BEFORE_CET_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 2, 59, 59), is_dst=True) # CEST+2:00:00
AFTER_CET_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 2, 0, 0), is_dst=False) # CET+1:00:00
def test_CET_transitions(self):
assert BEFORE_CEST_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 3, 26, 0, 59, 59))
assert AFTER_CEST_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 3, 26, 1, 0, 0))
assert BEFORE_CET_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 10, 29, 0, 59, 59))
assert AFTER_CET_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 10, 29, 1, 0, 0))
@override_settings(TIME_ZONE='Europe/Madrid')
# @mock.patch('django.utils.timezone.get_default_timezone', spain_tz)
# @mock.patch('django.utils.timezone.get_current_timezone', spain_tz)
class ScheduleModelTests(TestDataMixin, TestCase):
def setUp(self):
synopsis = '''
This programme has complex schedules to test timezone changes.
Only active between March and October 2017
'''
programme, created = Programme.objects.get_or_create(
name='Timezone', defaults={
'synopsis': synopsis,
'language': 'en',
'photo': 'defaults/example/radio_1.jpg',
'current_season': 1,
'category': 'News & Politics',
'_runtime': 60,
'start_date': datetime.date(2017, 3, 1),
'end_date': datetime.date(2017, 10, 31),
}
)
self.cest_schedule, created = Schedule.objects.get_or_create(
programme=programme,
type='L',
calendar=self.calendar,
recurrences=recurrence.Recurrence(
rrules=[recurrence.Rule(
recurrence.DAILY, until=SPAIN_TZ.localize(datetime.datetime(2017, 3, 27)))]
),
start_dt=SPAIN_TZ.localize(datetime.datetime(2017, 3, 25, 10, 00, 00)))
self.cet_schedule, created = Schedule.objects.get_or_create(
programme=programme,
type='L',
calendar=self.calendar,
recurrences=recurrence.Recurrence(
rrules=[recurrence.Rule(recurrence.DAILY)],
),
start_dt=SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 00, 00)))
def test_transform_dt_to_default_tz(self):
utc_dt = pytz.utc.localize(datetime.datetime(2017, 1, 1, 0, 00, 00))
spain_dt = transform_dt_to_default_tz(utc_dt)
self.assertEqual(spain_dt.tzinfo.zone, 'Europe/Madrid')
self.assertEqual(spain_dt, SPAIN_TZ.localize(datetime.datetime(2017, 1, 1, 1, 0, 0)))
def test_cleaned_internal_recurrence_dates(self):
self.assertEqual(
self.cest_schedule.recurrences.rrules[0].until,
SPAIN_TZ.localize(datetime.datetime(2017, 3, 27, 23, 59, 59)))
def test_CEST_transition(self):
after = SPAIN_TZ.localize(datetime.datetime(2017, 2, 1, 0, 0, 00))
before = SPAIN_TZ.localize(datetime.datetime(2017, 11, 30, 0, 0, 00))
dates_between = self.cest_schedule.dates_between(after, before)
expected_dates = (
SPAIN_TZ.localize(datetime.datetime(2017, 3, 25, 10, 0, 0)),
SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 10, 0, 0)),
SPAIN_TZ.localize(datetime.datetime(2017, 3, 27, 10, 0, 0)),
)
self.assertCountEqual(expected_dates, dates_between)
def test_CET_transition(self):
after = SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 0, 0))
before = SPAIN_TZ.localize(datetime.datetime(2017, 10, 30, 14, 0, 0))
dates_between = self.cet_schedule.dates_between(after, before)
expected_dates = (
SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 0, 0)),
SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 14, 0, 0)),
SPAIN_TZ.localize(datetime.datetime(2017, 10, 30, 14, 0, 0)),
)
self.assertCountEqual(expected_dates, dates_between) | [
"import datetime\n",
"\n",
"import pytz\n",
"import recurrence\n",
"from django.test import TestCase\n",
"from django.test import override_settings\n",
"\n",
"from radioco.apps.programmes.models import Programme\n",
"from radioco.apps.radioco.test_utils import TestDataMixin, SPAIN_TZ\n",
"from radioco.apps.radioco.tz_utils import transform_dt_to_default_tz\n",
"from radioco.apps.schedules.models import Schedule\n",
"\n",
"\n",
"BEFORE_CEST_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 1, 59, 59)) # CET+1:00:00\n",
"AFTER_CEST_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 3, 0, 0)) # CEST+2:00:00\n",
"\n",
"BEFORE_CET_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 2, 59, 59), is_dst=True) # CEST+2:00:00\n",
"AFTER_CET_TRANSITION = SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 2, 0, 0), is_dst=False) # CET+1:00:00\n",
"\n",
"\n",
"def test_CET_transitions(self):\n",
" assert BEFORE_CEST_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 3, 26, 0, 59, 59))\n",
" assert AFTER_CEST_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 3, 26, 1, 0, 0))\n",
"\n",
" assert BEFORE_CET_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 10, 29, 0, 59, 59))\n",
" assert AFTER_CET_TRANSITION == pytz.utc.localize(datetime.datetime(2017, 10, 29, 1, 0, 0))\n",
"\n",
"\n",
"@override_settings(TIME_ZONE='Europe/Madrid')\n",
"# @mock.patch('django.utils.timezone.get_default_timezone', spain_tz)\n",
"# @mock.patch('django.utils.timezone.get_current_timezone', spain_tz)\n",
"class ScheduleModelTests(TestDataMixin, TestCase):\n",
"\n",
" def setUp(self):\n",
" synopsis = '''\n",
" This programme has complex schedules to test timezone changes.\n",
" Only active between March and October 2017\n",
" '''\n",
" programme, created = Programme.objects.get_or_create(\n",
" name='Timezone', defaults={\n",
" 'synopsis': synopsis,\n",
" 'language': 'en',\n",
" 'photo': 'defaults/example/radio_1.jpg',\n",
" 'current_season': 1,\n",
" 'category': 'News & Politics',\n",
" '_runtime': 60,\n",
" 'start_date': datetime.date(2017, 3, 1),\n",
" 'end_date': datetime.date(2017, 10, 31),\n",
" }\n",
" )\n",
" self.cest_schedule, created = Schedule.objects.get_or_create(\n",
" programme=programme,\n",
" type='L',\n",
" calendar=self.calendar,\n",
" recurrences=recurrence.Recurrence(\n",
" rrules=[recurrence.Rule(\n",
" recurrence.DAILY, until=SPAIN_TZ.localize(datetime.datetime(2017, 3, 27)))]\n",
" ),\n",
" start_dt=SPAIN_TZ.localize(datetime.datetime(2017, 3, 25, 10, 00, 00)))\n",
"\n",
" self.cet_schedule, created = Schedule.objects.get_or_create(\n",
" programme=programme,\n",
" type='L',\n",
" calendar=self.calendar,\n",
" recurrences=recurrence.Recurrence(\n",
" rrules=[recurrence.Rule(recurrence.DAILY)],\n",
" ),\n",
" start_dt=SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 00, 00)))\n",
"\n",
" def test_transform_dt_to_default_tz(self):\n",
" utc_dt = pytz.utc.localize(datetime.datetime(2017, 1, 1, 0, 00, 00))\n",
" spain_dt = transform_dt_to_default_tz(utc_dt)\n",
" self.assertEqual(spain_dt.tzinfo.zone, 'Europe/Madrid')\n",
" self.assertEqual(spain_dt, SPAIN_TZ.localize(datetime.datetime(2017, 1, 1, 1, 0, 0)))\n",
"\n",
" def test_cleaned_internal_recurrence_dates(self):\n",
" self.assertEqual(\n",
" self.cest_schedule.recurrences.rrules[0].until,\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 3, 27, 23, 59, 59)))\n",
"\n",
" def test_CEST_transition(self):\n",
" after = SPAIN_TZ.localize(datetime.datetime(2017, 2, 1, 0, 0, 00))\n",
" before = SPAIN_TZ.localize(datetime.datetime(2017, 11, 30, 0, 0, 00))\n",
"\n",
" dates_between = self.cest_schedule.dates_between(after, before)\n",
"\n",
" expected_dates = (\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 3, 25, 10, 0, 0)),\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 3, 26, 10, 0, 0)),\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 3, 27, 10, 0, 0)),\n",
" )\n",
" self.assertCountEqual(expected_dates, dates_between)\n",
"\n",
" def test_CET_transition(self):\n",
" after = SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 0, 0))\n",
" before = SPAIN_TZ.localize(datetime.datetime(2017, 10, 30, 14, 0, 0))\n",
"\n",
" dates_between = self.cet_schedule.dates_between(after, before)\n",
"\n",
" expected_dates = (\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 10, 28, 14, 0, 0)),\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 10, 29, 14, 0, 0)),\n",
" SPAIN_TZ.localize(datetime.datetime(2017, 10, 30, 14, 0, 0)),\n",
" )\n",
" self.assertCountEqual(expected_dates, dates_between)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.010101010101010102,
0,
0.008695652173913044,
0.008928571428571428,
0,
0,
0,
0.01020408163265306,
0.010526315789473684,
0,
0.01020408163265306,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016666666666666666
] | 105 | 0.001338 |
import os
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsJsonDistrictsImporter
"""
Define a stub implementation of json importer
district_record_to_dict() may return None
station_record_to_dict() may return None or a list
"""
class Command(BaseCsvStationsJsonDistrictsImporter):
srid = 4326
council_id = 'X01000000'
districts_name = 'test.geojson'
stations_name = 'test.csv'
base_folder_path = os.path.join(os.path.dirname(__file__), '../fixtures/special_cases')
def district_record_to_dict(self, record):
properties = record['properties']
if properties['id'] == 'invalid':
return None
return {
'council': self.council,
'internal_council_id': properties['id'],
'name': properties['name']
}
def station_record_to_dict(self, record):
if record.districts == 'invalid':
return None
location = Point(float(record.lng), float(record.lat), srid=self.get_srid())
stations = []
districts = record.districts.split('/')
for district in districts:
stations.append({
'council': self.council,
'internal_council_id': district,
'postcode': record.postcode,
'address': record.address,
'location': location,
'polling_district_id': district
})
return stations
| [
"import os\n",
"from django.contrib.gis.geos import Point\n",
"from data_collection.management.commands import BaseCsvStationsJsonDistrictsImporter\n",
"\n",
"\n",
"\"\"\"\n",
"Define a stub implementation of json importer\n",
"district_record_to_dict() may return None\n",
"station_record_to_dict() may return None or a list\n",
"\"\"\"\n",
"class Command(BaseCsvStationsJsonDistrictsImporter):\n",
"\n",
" srid = 4326\n",
" council_id = 'X01000000'\n",
" districts_name = 'test.geojson'\n",
" stations_name = 'test.csv'\n",
" base_folder_path = os.path.join(os.path.dirname(__file__), '../fixtures/special_cases')\n",
"\n",
" def district_record_to_dict(self, record):\n",
"\n",
" properties = record['properties']\n",
"\n",
" if properties['id'] == 'invalid':\n",
" return None\n",
"\n",
" return {\n",
" 'council': self.council,\n",
" 'internal_council_id': properties['id'],\n",
" 'name': properties['name']\n",
" }\n",
"\n",
" def station_record_to_dict(self, record):\n",
"\n",
" if record.districts == 'invalid':\n",
" return None\n",
"\n",
" location = Point(float(record.lng), float(record.lat), srid=self.get_srid())\n",
"\n",
" stations = []\n",
" districts = record.districts.split('/')\n",
" for district in districts:\n",
" stations.append({\n",
" 'council': self.council,\n",
" 'internal_council_id': district,\n",
" 'postcode': record.postcode,\n",
" 'address': record.address,\n",
" 'location': location,\n",
" 'polling_district_id': district\n",
" })\n",
"\n",
" return stations\n"
] | [
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0,
0.03571428571428571,
0.02857142857142857,
0.02631578947368421,
0.029411764705882353,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 51 | 0.003398 |
from __future__ import print_function
from twisted.protocols import amp
class Sum(amp.Command):
arguments = [('a', amp.Integer()),
('b', amp.Integer())]
response = [('total', amp.Integer())]
class Divide(amp.Command):
arguments = [('numerator', amp.Integer()),
('denominator', amp.Integer())]
response = [('result', amp.Float())]
errors = {ZeroDivisionError: 'ZERO_DIVISION'}
class Math(amp.AMP):
def sum(self, a, b):
total = a + b
print('Did a sum: %d + %d = %d' % (a, b, total))
return {'total': total}
Sum.responder(sum)
def divide(self, numerator, denominator):
result = float(numerator) / denominator
print('Divided: %d / %d = %f' % (numerator, denominator, result))
return {'result': result}
Divide.responder(divide)
def main():
from twisted.internet import reactor
from twisted.internet.protocol import Factory
pf = Factory()
pf.protocol = Math
reactor.listenTCP(1234, pf)
print('started')
reactor.run()
if __name__ == '__main__':
main()
| [
"from __future__ import print_function\n",
"\n",
"from twisted.protocols import amp\n",
"\n",
"class Sum(amp.Command):\n",
" arguments = [('a', amp.Integer()),\n",
" ('b', amp.Integer())]\n",
" response = [('total', amp.Integer())]\n",
"\n",
"\n",
"class Divide(amp.Command):\n",
" arguments = [('numerator', amp.Integer()),\n",
" ('denominator', amp.Integer())]\n",
" response = [('result', amp.Float())]\n",
" errors = {ZeroDivisionError: 'ZERO_DIVISION'}\n",
"\n",
"\n",
"class Math(amp.AMP):\n",
" def sum(self, a, b):\n",
" total = a + b\n",
" print('Did a sum: %d + %d = %d' % (a, b, total))\n",
" return {'total': total}\n",
" Sum.responder(sum)\n",
"\n",
" def divide(self, numerator, denominator):\n",
" result = float(numerator) / denominator\n",
" print('Divided: %d / %d = %f' % (numerator, denominator, result))\n",
" return {'result': result}\n",
" Divide.responder(divide)\n",
"\n",
"\n",
"def main():\n",
" from twisted.internet import reactor\n",
" from twisted.internet.protocol import Factory\n",
" pf = Factory()\n",
" pf.protocol = Math\n",
" reactor.listenTCP(1234, pf)\n",
" print('started')\n",
" reactor.run()\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 42 | 0.001874 |
# -*- coding: utf-8 -*-
import os
import numexpr
from openfisca_core.indexed_enums import EnumArray
def assert_near(value, target_value, absolute_error_margin = None, message = '', relative_error_margin = None):
'''
:param value: Value returned by the test
:param target_value: Value that the test should return to pass
:param absolute_error_margin: Absolute error margin authorized
:param message: Error message to be displayed if the test fails
:param relative_error_margin: Relative error margin authorized
Limit : This function cannot be used to assert near periods.
'''
import numpy as np
if absolute_error_margin is None and relative_error_margin is None:
absolute_error_margin = 0
if not isinstance(value, np.ndarray):
value = np.array(value)
if isinstance(value, EnumArray):
return assert_enum_equals(value, target_value, message)
if np.issubdtype(value.dtype, np.datetime64):
target_value = np.array(target_value, dtype = value.dtype)
assert_datetime_equals(value, target_value, message)
if isinstance(target_value, str):
target_value = eval_expression(target_value)
target_value = np.array(target_value).astype(np.float32)
value = np.array(value).astype(np.float32)
diff = abs(target_value - value)
if absolute_error_margin is not None:
assert (diff <= absolute_error_margin).all(), \
'{}{} differs from {} with an absolute margin {} > {}'.format(message, value, target_value,
diff, absolute_error_margin)
if relative_error_margin is not None:
assert (diff <= abs(relative_error_margin * target_value)).all(), \
'{}{} differs from {} with a relative margin {} > {}'.format(message, value, target_value,
diff, abs(relative_error_margin * target_value))
def assert_datetime_equals(value, target_value, message = ''):
assert (value == target_value).all(), '{}{} differs from {}.'.format(message, value, target_value)
def assert_enum_equals(value, target_value, message = ''):
value = value.decode_to_str()
assert (value == target_value).all(), '{}{} differs from {}.'.format(message, value, target_value)
def indent(text):
return " {}".format(text.replace(os.linesep, "{} ".format(os.linesep)))
def get_trace_tool_link(scenario, variables, api_url, trace_tool_url):
import json
import urllib
scenario_json = scenario.to_json()
simulation_json = {
'scenarios': [scenario_json],
'variables': variables,
}
url = trace_tool_url + '?' + urllib.urlencode({
'simulation': json.dumps(simulation_json),
'api_url': api_url,
})
return url
def eval_expression(expression):
try:
return numexpr.evaluate(expression)
except (KeyError, TypeError):
return expression
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"\n",
"import os\n",
"\n",
"import numexpr\n",
"\n",
"from openfisca_core.indexed_enums import EnumArray\n",
"\n",
"\n",
"def assert_near(value, target_value, absolute_error_margin = None, message = '', relative_error_margin = None):\n",
" '''\n",
"\n",
" :param value: Value returned by the test\n",
" :param target_value: Value that the test should return to pass\n",
" :param absolute_error_margin: Absolute error margin authorized\n",
" :param message: Error message to be displayed if the test fails\n",
" :param relative_error_margin: Relative error margin authorized\n",
"\n",
" Limit : This function cannot be used to assert near periods.\n",
"\n",
" '''\n",
"\n",
" import numpy as np\n",
"\n",
" if absolute_error_margin is None and relative_error_margin is None:\n",
" absolute_error_margin = 0\n",
" if not isinstance(value, np.ndarray):\n",
" value = np.array(value)\n",
" if isinstance(value, EnumArray):\n",
" return assert_enum_equals(value, target_value, message)\n",
" if np.issubdtype(value.dtype, np.datetime64):\n",
" target_value = np.array(target_value, dtype = value.dtype)\n",
" assert_datetime_equals(value, target_value, message)\n",
" if isinstance(target_value, str):\n",
" target_value = eval_expression(target_value)\n",
"\n",
" target_value = np.array(target_value).astype(np.float32)\n",
"\n",
" value = np.array(value).astype(np.float32)\n",
" diff = abs(target_value - value)\n",
" if absolute_error_margin is not None:\n",
" assert (diff <= absolute_error_margin).all(), \\\n",
" '{}{} differs from {} with an absolute margin {} > {}'.format(message, value, target_value,\n",
" diff, absolute_error_margin)\n",
" if relative_error_margin is not None:\n",
" assert (diff <= abs(relative_error_margin * target_value)).all(), \\\n",
" '{}{} differs from {} with a relative margin {} > {}'.format(message, value, target_value,\n",
" diff, abs(relative_error_margin * target_value))\n",
"\n",
"\n",
"def assert_datetime_equals(value, target_value, message = ''):\n",
" assert (value == target_value).all(), '{}{} differs from {}.'.format(message, value, target_value)\n",
"\n",
"\n",
"def assert_enum_equals(value, target_value, message = ''):\n",
" value = value.decode_to_str()\n",
" assert (value == target_value).all(), '{}{} differs from {}.'.format(message, value, target_value)\n",
"\n",
"\n",
"def indent(text):\n",
" return \" {}\".format(text.replace(os.linesep, \"{} \".format(os.linesep)))\n",
"\n",
"\n",
"def get_trace_tool_link(scenario, variables, api_url, trace_tool_url):\n",
" import json\n",
" import urllib\n",
"\n",
" scenario_json = scenario.to_json()\n",
" simulation_json = {\n",
" 'scenarios': [scenario_json],\n",
" 'variables': variables,\n",
" }\n",
" url = trace_tool_url + '?' + urllib.urlencode({\n",
" 'simulation': json.dumps(simulation_json),\n",
" 'api_url': api_url,\n",
" })\n",
" return url\n",
"\n",
"\n",
"def eval_expression(expression):\n",
" try:\n",
" return numexpr.evaluate(expression)\n",
" except (KeyError, TypeError):\n",
" return expression\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029850746268656716,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.022222222222222223,
0,
0,
0.009708737864077669,
0.015384615384615385,
0,
0,
0.031746031746031744,
0.009708737864077669,
0,
0,
0.03389830508474576,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 85 | 0.002757 |
# Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from horizon.utils import functions as utils
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.hypervisors \
import tables as project_tables
from openstack_dashboard.dashboards.admin.hypervisors \
import tabs as project_tabs
class AdminIndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.HypervisorHostTabs
template_name = 'admin/hypervisors/index.html'
def get_data(self):
hypervisors = []
try:
hypervisors = api.nova.hypervisor_list(self.request)
hypervisors.sort(key=utils.natural_sort('hypervisor_hostname'))
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor information.'))
return hypervisors
def get_context_data(self, **kwargs):
context = super(AdminIndexView, self).get_context_data(**kwargs)
try:
context["stats"] = api.nova.hypervisor_stats(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor statistics.'))
return context
class AdminDetailView(tables.DataTableView):
table_class = project_tables.AdminHypervisorInstancesTable
template_name = 'admin/hypervisors/detail.html'
def get_data(self):
instances = []
try:
result = api.nova.hypervisor_search(self.request,
self.kwargs['hypervisor'])
for hypervisor in result:
try:
instances += hypervisor.servers
except AttributeError:
pass
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor instances list.'))
return instances
| [
"# Copyright 2013 B1 Systems GmbH\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"from django.utils.translation import ugettext_lazy as _\n",
"\n",
"from horizon import exceptions\n",
"from horizon import tables\n",
"from horizon import tabs\n",
"from horizon.utils import functions as utils\n",
"\n",
"from openstack_dashboard import api\n",
"from openstack_dashboard.dashboards.admin.hypervisors \\\n",
" import tables as project_tables\n",
"from openstack_dashboard.dashboards.admin.hypervisors \\\n",
" import tabs as project_tabs\n",
"\n",
"\n",
"class AdminIndexView(tabs.TabbedTableView):\n",
" tab_group_class = project_tabs.HypervisorHostTabs\n",
" template_name = 'admin/hypervisors/index.html'\n",
"\n",
" def get_data(self):\n",
" hypervisors = []\n",
" try:\n",
" hypervisors = api.nova.hypervisor_list(self.request)\n",
" hypervisors.sort(key=utils.natural_sort('hypervisor_hostname'))\n",
" except Exception:\n",
" exceptions.handle(self.request,\n",
" _('Unable to retrieve hypervisor information.'))\n",
"\n",
" return hypervisors\n",
"\n",
" def get_context_data(self, **kwargs):\n",
" context = super(AdminIndexView, self).get_context_data(**kwargs)\n",
" try:\n",
" context[\"stats\"] = api.nova.hypervisor_stats(self.request)\n",
" except Exception:\n",
" exceptions.handle(self.request,\n",
" _('Unable to retrieve hypervisor statistics.'))\n",
"\n",
" return context\n",
"\n",
"\n",
"class AdminDetailView(tables.DataTableView):\n",
" table_class = project_tables.AdminHypervisorInstancesTable\n",
" template_name = 'admin/hypervisors/detail.html'\n",
"\n",
" def get_data(self):\n",
" instances = []\n",
" try:\n",
" result = api.nova.hypervisor_search(self.request,\n",
" self.kwargs['hypervisor'])\n",
" for hypervisor in result:\n",
" try:\n",
" instances += hypervisor.servers\n",
" except AttributeError:\n",
" pass\n",
" except Exception:\n",
" exceptions.handle(self.request,\n",
" _('Unable to retrieve hypervisor instances list.'))\n",
" return instances\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014705882352941176,
0
] | 72 | 0.000635 |
# -*- coding: utf-8 -*-
import struct
from math import asin, atan, cos, exp, log, pi, sin, sqrt, tan
from colorama import init
from networkx.algorithms.clique import find_cliques
import networkx as nx
import numpy as np
from datetime import datetime as dt, timedelta
init()
TIME_PERIODS = (
(60, 'minute'),
(3600, 'hour'),
(86400, 'day'),
(86400*7, 'week')
)
FORT_CACHE = {}
def fort_details(bot, fort_id, latitude, longitude):
"""
Lookup fort metadata and (if possible) serve from cache.
"""
if fort_id not in FORT_CACHE:
"""
Lookup the fort details and cache the response for future use.
"""
request = bot.api.create_request()
request.fort_details(fort_id=fort_id, latitude=latitude, longitude=longitude)
try:
response_dict = request.call()
FORT_CACHE[fort_id] = response_dict['responses']['FORT_DETAILS']
except Exception:
pass
# Just to avoid KeyErrors
return FORT_CACHE.get(fort_id, {})
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def distance(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * \
cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a)) * 1000
def convert(distance, from_unit, to_unit): # Converts units
# Example of converting distance from meters to feet:
# convert(100.0,"m","ft")
conversions = {
"mm": {"mm": 1.0,
"cm": 1.0 / 10.0,
"m": 1.0 / 1000.0,
"km": 1.0 / 1000000,
"ft": 0.00328084,
"yd": 0.00109361,
"mi": 1.0 / 1609340.0007802},
"cm": {"mm": 10.0,
"cm": 1.0,
"m": 1.0 / 100,
"km": 1.0 / 100000,
"ft": 0.0328084,
"yd": 0.0109361,
"mi": 1.0 / 160934.0},
"m": {"mm": 1000,
"cm": 100.0,
"m": 1.0,
"km": 1.0 / 1000.0,
"ft": 3.28084,
"yd": 1.09361,
"mi": 1.0 / 1609.34},
"km": {"mm": 100000,
"cm": 10000.0,
"m": 1000.0,
"km": 1.0,
"ft": 3280.84,
"yd": 1093.61,
"mi": 1.0 / 1.60934},
"ft": {"mm": 1.0 / 328.084,
"cm": 1.0 / 32.8084,
"m": 1.0 / 3.28084,
"km": 1 / 3280.84,
"ft": 1.0,
"yd": 1.0 / 3.0,
"mi": 1.0 / 5280.0},
"yd": {"mm": 1.0 / 328.084,
"cm": 1.0 / 32.8084,
"m": 1.0 / 3.28084,
"km": 1 / 1093.61,
"ft": 3.0,
"yd": 1.0,
"mi": 1.0 / 1760.0},
"mi": {"mm": 1609340.0007802,
"cm": 160934.0,
"m": 1609.34,
"km": 1.60934,
"ft": 5280.0,
"yd": 1760.0,
"mi": 1.0}
}
return distance * conversions[from_unit][to_unit]
def dist_to_str(distance, unit, append_unit = True):
if append_unit:
return '{:.2f}{}'.format(distance, unit)
else:
return '{:.2f}'.format(distance)
def format_dist(distance, unit, append_unit = True):
# Assumes that distance is in meters and converts it to the given unit, then a formatted string is returned
# Ex: format_dist(1500, 'km') returns the string "1.5km"
return dist_to_str(convert(distance, 'm', unit), unit, append_unit)
def getSeconds(strTime):
'''
Return the duration in seconds of a time string
:param strTime: string time of format %H:%M:%S
'''
try:
x = dt.strptime(strTime, '%H:%M:%S')
seconds = int(timedelta(hours=x.hour,minutes=x.minute,seconds=x.second).total_seconds())
except ValueError:
seconds = 0;
if seconds < 0:
seconds = 0;
return seconds
def format_time(seconds):
# Return a string displaying the time given as seconds or minutes
num, duration = 0, long(round(seconds))
runtime = []
for period, unit in TIME_PERIODS[::-1]:
num, duration = divmod(duration, period)
if num:
p = '{0}{1}'.format(unit, 's'*(num!=1))
runtime.append('{0} {1}'.format(num, p))
runtime.append('{0} second{1}'.format(duration, 's'*(duration!=1)))
return ', '.join(runtime)
def i2f(int):
return struct.unpack('<d', struct.pack('<Q', int))[0]
def print_green(message):
print(u'\033[92m' + message.decode('utf-8') + '\033[0m')
def print_yellow(message):
print(u'\033[93m' + message.decode('utf-8') + '\033[0m')
def print_red(message):
print(u'\033[91m' + message.decode('utf-8') + '\033[0m')
def float_equal(f1, f2, epsilon=1e-8):
if f1 > f2:
return f1 - f2 < epsilon
if f2 > f1:
return f2 - f1 < epsilon
return True
# pseudo mercator projection
EARTH_RADIUS_MAJ = 6378137.0
EARTH_RADIUS_MIN = 6356752.3142
RATIO = (EARTH_RADIUS_MIN / EARTH_RADIUS_MAJ)
ECCENT = sqrt(1.0 - RATIO**2)
COM = 0.5 * ECCENT
def coord2merc(lat, lng):
return lng2x(lng), lat2y(lat)
def merc2coord(vec):
return y2lat(vec[1]), x2lng(vec[0])
def y2lat(y):
ts = exp(-y / EARTH_RADIUS_MAJ)
phi = pi / 2.0 - 2 * atan(ts)
dphi = 1.0
for i in range(15):
if abs(dphi) < 0.000000001:
break
con = ECCENT * sin(phi)
dphi = pi / 2.0 - 2 * atan (ts * pow((1.0 - con) / (1.0 + con), COM)) - phi
phi += dphi
return rad2deg(phi)
def lat2y(lat):
lat = min(89.5, max(lat, -89.5))
phi = deg2rad(lat)
sinphi = sin(phi)
con = ECCENT * sinphi
con = pow((1.0 - con) / (1.0 + con), COM)
ts = tan(0.5 * (pi * 0.5 - phi)) / con
return 0 - EARTH_RADIUS_MAJ * log(ts)
def x2lng(x):
return rad2deg(x) / EARTH_RADIUS_MAJ
def lng2x(lng):
return EARTH_RADIUS_MAJ * deg2rad(lng);
def deg2rad(deg):
return deg * pi / 180.0
def rad2deg(rad):
return rad * 180.0 / pi
def find_biggest_cluster(radius, points, order=None):
graph = nx.Graph()
for point in points:
if order is '9QM=':
#is a lure module - 9QM=
now = int(time.time())
remaining = now - point['last_modified_timestamp_ms']
f = point['latitude'], point['longitude'], remaining
else:
f = point['latitude'], point['longitude'], 0
graph.add_node(f)
for node in graph.nodes():
if node != f and distance(f[0], f[1], node[0], node[1]) <= radius*2:
graph.add_edge(f, node)
cliques = list(find_cliques(graph))
if len(cliques) > 0:
max_clique = max(list(find_cliques(graph)), key=lambda l: (len(l), sum(x[2] for x in l)))
merc_clique = [coord2merc(x[0], x[1]) for x in max_clique]
clique_x, clique_y = zip(*merc_clique)
best_point = np.mean(clique_x), np.mean(clique_y)
best_coord = merc2coord(best_point)
return {'latitude': best_coord[0], 'longitude': best_coord[1], 'num_points': len(max_clique)}
else:
return None
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"import struct\n",
"from math import asin, atan, cos, exp, log, pi, sin, sqrt, tan\n",
"\n",
"from colorama import init\n",
"from networkx.algorithms.clique import find_cliques\n",
"\n",
"import networkx as nx\n",
"import numpy as np\n",
"\n",
"from datetime import datetime as dt, timedelta\n",
"\n",
"init()\n",
"\n",
"TIME_PERIODS = (\n",
" (60, 'minute'),\n",
" (3600, 'hour'),\n",
" (86400, 'day'),\n",
" (86400*7, 'week')\n",
")\n",
"\n",
"FORT_CACHE = {}\n",
"def fort_details(bot, fort_id, latitude, longitude):\n",
" \"\"\"\n",
" Lookup fort metadata and (if possible) serve from cache.\n",
" \"\"\"\n",
"\n",
" if fort_id not in FORT_CACHE:\n",
" \"\"\"\n",
" Lookup the fort details and cache the response for future use.\n",
" \"\"\"\n",
" request = bot.api.create_request()\n",
" request.fort_details(fort_id=fort_id, latitude=latitude, longitude=longitude)\n",
" try:\n",
" response_dict = request.call()\n",
" FORT_CACHE[fort_id] = response_dict['responses']['FORT_DETAILS']\n",
" except Exception:\n",
" pass\n",
"\n",
" # Just to avoid KeyErrors\n",
" return FORT_CACHE.get(fort_id, {})\n",
"\n",
"def encode(cellid):\n",
" output = []\n",
" encoder._VarintEncoder()(output.append, cellid)\n",
" return ''.join(output)\n",
"\n",
"\n",
"def distance(lat1, lon1, lat2, lon2):\n",
" p = 0.017453292519943295\n",
" a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * \\\n",
" cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2\n",
" return 12742 * asin(sqrt(a)) * 1000\n",
"\n",
"\n",
"def convert(distance, from_unit, to_unit): # Converts units\n",
" # Example of converting distance from meters to feet:\n",
" # convert(100.0,\"m\",\"ft\")\n",
" conversions = {\n",
" \"mm\": {\"mm\": 1.0,\n",
" \"cm\": 1.0 / 10.0,\n",
" \"m\": 1.0 / 1000.0,\n",
" \"km\": 1.0 / 1000000,\n",
" \"ft\": 0.00328084,\n",
" \"yd\": 0.00109361,\n",
" \"mi\": 1.0 / 1609340.0007802},\n",
" \"cm\": {\"mm\": 10.0,\n",
" \"cm\": 1.0,\n",
" \"m\": 1.0 / 100,\n",
" \"km\": 1.0 / 100000,\n",
" \"ft\": 0.0328084,\n",
" \"yd\": 0.0109361,\n",
" \"mi\": 1.0 / 160934.0},\n",
" \"m\": {\"mm\": 1000,\n",
" \"cm\": 100.0,\n",
" \"m\": 1.0,\n",
" \"km\": 1.0 / 1000.0,\n",
" \"ft\": 3.28084,\n",
" \"yd\": 1.09361,\n",
" \"mi\": 1.0 / 1609.34},\n",
" \"km\": {\"mm\": 100000,\n",
" \"cm\": 10000.0,\n",
" \"m\": 1000.0,\n",
" \"km\": 1.0,\n",
" \"ft\": 3280.84,\n",
" \"yd\": 1093.61,\n",
" \"mi\": 1.0 / 1.60934},\n",
" \"ft\": {\"mm\": 1.0 / 328.084,\n",
" \"cm\": 1.0 / 32.8084,\n",
" \"m\": 1.0 / 3.28084,\n",
" \"km\": 1 / 3280.84,\n",
" \"ft\": 1.0,\n",
" \"yd\": 1.0 / 3.0,\n",
" \"mi\": 1.0 / 5280.0},\n",
" \"yd\": {\"mm\": 1.0 / 328.084,\n",
" \"cm\": 1.0 / 32.8084,\n",
" \"m\": 1.0 / 3.28084,\n",
" \"km\": 1 / 1093.61,\n",
" \"ft\": 3.0,\n",
" \"yd\": 1.0,\n",
" \"mi\": 1.0 / 1760.0},\n",
" \"mi\": {\"mm\": 1609340.0007802,\n",
" \"cm\": 160934.0,\n",
" \"m\": 1609.34,\n",
" \"km\": 1.60934,\n",
" \"ft\": 5280.0,\n",
" \"yd\": 1760.0,\n",
" \"mi\": 1.0}\n",
" }\n",
" return distance * conversions[from_unit][to_unit]\n",
"\n",
"\n",
"def dist_to_str(distance, unit, append_unit = True):\n",
" if append_unit:\n",
" return '{:.2f}{}'.format(distance, unit)\n",
" else:\n",
" return '{:.2f}'.format(distance)\n",
"\n",
"\n",
"def format_dist(distance, unit, append_unit = True):\n",
" # Assumes that distance is in meters and converts it to the given unit, then a formatted string is returned\n",
" # Ex: format_dist(1500, 'km') returns the string \"1.5km\"\n",
" return dist_to_str(convert(distance, 'm', unit), unit, append_unit)\n",
"\n",
"\n",
"def getSeconds(strTime):\n",
" '''\n",
" Return the duration in seconds of a time string\n",
" :param strTime: string time of format %H:%M:%S\n",
" '''\n",
" try:\n",
" x = dt.strptime(strTime, '%H:%M:%S')\n",
" seconds = int(timedelta(hours=x.hour,minutes=x.minute,seconds=x.second).total_seconds())\n",
" except ValueError: \n",
" seconds = 0;\n",
" \n",
" if seconds < 0:\n",
" seconds = 0;\n",
" \n",
" return seconds\n",
" \n",
"def format_time(seconds):\n",
" # Return a string displaying the time given as seconds or minutes\n",
" num, duration = 0, long(round(seconds))\n",
" runtime = []\n",
" for period, unit in TIME_PERIODS[::-1]:\n",
" num, duration = divmod(duration, period)\n",
" if num:\n",
" p = '{0}{1}'.format(unit, 's'*(num!=1))\n",
" runtime.append('{0} {1}'.format(num, p))\n",
"\n",
" runtime.append('{0} second{1}'.format(duration, 's'*(duration!=1)))\n",
"\n",
" return ', '.join(runtime)\n",
"\n",
"\n",
"def i2f(int):\n",
" return struct.unpack('<d', struct.pack('<Q', int))[0]\n",
"\n",
"\n",
"def print_green(message):\n",
" print(u'\\033[92m' + message.decode('utf-8') + '\\033[0m')\n",
"\n",
"\n",
"def print_yellow(message):\n",
" print(u'\\033[93m' + message.decode('utf-8') + '\\033[0m')\n",
"\n",
"\n",
"def print_red(message):\n",
" print(u'\\033[91m' + message.decode('utf-8') + '\\033[0m')\n",
"\n",
"\n",
"def float_equal(f1, f2, epsilon=1e-8):\n",
" if f1 > f2:\n",
" return f1 - f2 < epsilon\n",
" if f2 > f1:\n",
" return f2 - f1 < epsilon\n",
" return True\n",
"\n",
"\n",
"# pseudo mercator projection\n",
"EARTH_RADIUS_MAJ = 6378137.0\n",
"EARTH_RADIUS_MIN = 6356752.3142\n",
"RATIO = (EARTH_RADIUS_MIN / EARTH_RADIUS_MAJ)\n",
"ECCENT = sqrt(1.0 - RATIO**2)\n",
"COM = 0.5 * ECCENT\n",
"\n",
"\n",
"def coord2merc(lat, lng):\n",
" return lng2x(lng), lat2y(lat)\n",
"\n",
"\n",
"def merc2coord(vec):\n",
" return y2lat(vec[1]), x2lng(vec[0])\n",
"\n",
"\n",
"def y2lat(y):\n",
" ts = exp(-y / EARTH_RADIUS_MAJ)\n",
" phi = pi / 2.0 - 2 * atan(ts)\n",
" dphi = 1.0\n",
" for i in range(15):\n",
" if abs(dphi) < 0.000000001:\n",
" break\n",
" con = ECCENT * sin(phi)\n",
" dphi = pi / 2.0 - 2 * atan (ts * pow((1.0 - con) / (1.0 + con), COM)) - phi\n",
" phi += dphi\n",
" return rad2deg(phi)\n",
"\n",
"\n",
"def lat2y(lat):\n",
" lat = min(89.5, max(lat, -89.5))\n",
" phi = deg2rad(lat)\n",
" sinphi = sin(phi)\n",
" con = ECCENT * sinphi\n",
" con = pow((1.0 - con) / (1.0 + con), COM)\n",
" ts = tan(0.5 * (pi * 0.5 - phi)) / con\n",
" return 0 - EARTH_RADIUS_MAJ * log(ts)\n",
"\n",
"\n",
"def x2lng(x):\n",
" return rad2deg(x) / EARTH_RADIUS_MAJ\n",
"\n",
"\n",
"def lng2x(lng):\n",
" return EARTH_RADIUS_MAJ * deg2rad(lng);\n",
"\n",
"\n",
"def deg2rad(deg):\n",
" return deg * pi / 180.0\n",
"\n",
"\n",
"def rad2deg(rad):\n",
" return rad * 180.0 / pi\n",
"\n",
"\n",
"def find_biggest_cluster(radius, points, order=None):\n",
" graph = nx.Graph()\n",
" for point in points:\n",
" if order is '9QM=':\n",
" #is a lure module - 9QM=\n",
" now = int(time.time())\n",
" remaining = now - point['last_modified_timestamp_ms']\n",
" f = point['latitude'], point['longitude'], remaining\n",
" else:\n",
" f = point['latitude'], point['longitude'], 0\n",
" graph.add_node(f)\n",
" for node in graph.nodes():\n",
" if node != f and distance(f[0], f[1], node[0], node[1]) <= radius*2:\n",
" graph.add_edge(f, node)\n",
" cliques = list(find_cliques(graph))\n",
" if len(cliques) > 0:\n",
" max_clique = max(list(find_cliques(graph)), key=lambda l: (len(l), sum(x[2] for x in l)))\n",
" merc_clique = [coord2merc(x[0], x[1]) for x in max_clique]\n",
" clique_x, clique_y = zip(*merc_clique)\n",
" best_point = np.mean(clique_x), np.mean(clique_y)\n",
" best_coord = merc2coord(best_point)\n",
" return {'latitude': best_coord[0], 'longitude': best_coord[1], 'num_points': len(max_clique)}\n",
" else:\n",
" return None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03773584905660377,
0,
0,
0,
0,
0,
0,
0.03773584905660377,
0.008928571428571428,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030927835051546393,
0.041666666666666664,
0.047619047619047616,
0.1111111111111111,
0,
0.047619047619047616,
0.09090909090909091,
0,
0.2,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0.013888888888888888,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0.00980392156862745,
0,
0
] | 260 | 0.004441 |
from dolfin import *
from dolfin_adjoint import *
n = 30
mesh = UnitSquareMesh(n, n)
V = VectorFunctionSpace(mesh, "CG", 2)
ic = project(Expression(("sin(2*pi*x[0])", "cos(2*pi*x[1])"), degree=2), V)
def main(nu):
u = ic.copy(deepcopy=True)
u_next = Function(V)
v = TestFunction(V)
timestep = Constant(0.01)
F = (inner((u_next - u)/timestep, v)
+ inner(grad(u_next)*u_next, v)
+ nu*inner(grad(u_next), grad(v)))*dx
bc = DirichletBC(V, (0.0, 0.0), "on_boundary")
t = 0.0
end = 0.1
while (t <= end):
solve(F == 0, u_next, bc)
u.assign(u_next)
t += float(timestep)
return u
if __name__ == "__main__":
nu = Constant(0.0001, name="nu")
u = main(nu)
J = Functional(inner(u, u)*dx*dt[FINISH_TIME])
dJdnu = compute_gradient(J, ConstantControl("nu"))
Jnu = assemble(inner(u, u)*dx) # current value
parameters["adjoint"]["stop_annotating"] = True # stop registering equations
def Jhat(nu): # the functional as a pure function of nu
u = main(nu)
return assemble(inner(u, u)*dx)
conv_rate = taylor_test(Jhat, ConstantControl("nu"), Jnu, dJdnu)
| [
"from dolfin import *\n",
"from dolfin_adjoint import *\n",
"\n",
"n = 30\n",
"mesh = UnitSquareMesh(n, n)\n",
"V = VectorFunctionSpace(mesh, \"CG\", 2)\n",
"\n",
"ic = project(Expression((\"sin(2*pi*x[0])\", \"cos(2*pi*x[1])\"), degree=2), V)\n",
"\n",
"def main(nu):\n",
" u = ic.copy(deepcopy=True)\n",
" u_next = Function(V)\n",
" v = TestFunction(V)\n",
"\n",
" timestep = Constant(0.01)\n",
"\n",
" F = (inner((u_next - u)/timestep, v)\n",
" + inner(grad(u_next)*u_next, v)\n",
" + nu*inner(grad(u_next), grad(v)))*dx\n",
"\n",
" bc = DirichletBC(V, (0.0, 0.0), \"on_boundary\")\n",
"\n",
" t = 0.0\n",
" end = 0.1\n",
" while (t <= end):\n",
" solve(F == 0, u_next, bc)\n",
" u.assign(u_next)\n",
" t += float(timestep)\n",
"\n",
" return u\n",
"\n",
"if __name__ == \"__main__\":\n",
" nu = Constant(0.0001, name=\"nu\")\n",
" u = main(nu)\n",
"\n",
" J = Functional(inner(u, u)*dx*dt[FINISH_TIME])\n",
" dJdnu = compute_gradient(J, ConstantControl(\"nu\"))\n",
"\n",
" Jnu = assemble(inner(u, u)*dx) # current value\n",
"\n",
" parameters[\"adjoint\"][\"stop_annotating\"] = True # stop registering equations\n",
"\n",
" def Jhat(nu): # the functional as a pure function of nu\n",
" u = main(nu)\n",
" return assemble(inner(u, u)*dx)\n",
"\n",
" conv_rate = taylor_test(Jhat, ConstantControl(\"nu\"), Jnu, dJdnu)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0.024691358024691357,
0,
0.016666666666666666,
0,
0,
0,
0
] | 47 | 0.004623 |
# Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testscenarios
from webob import exc
from neutron.common import utils
from neutron import context as nctx
from neutron.db import api as db_api
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db.models import external_net as ext_net_models
from neutron.db.models import l3 as l3_models
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron import manager
from neutron.objects import network as net_obj
from neutron.objects import subnet as subnet_obj
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A simple class for making a concrete class out of the mixin
# for the case of a plugin that integrates l3 routing.
class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
# A simple class for making a concrete class out of the mixin
# for the case of a l3 router service plugin
class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode"]
class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):
scenarios = [
('enabled', {'enable_snat_by_default': True}),
('disabled', {'enable_snat_by_default': False})]
def setUp(self):
super(TestGetEnableSnat, self).setUp()
self.config(enable_snat_by_default=self.enable_snat_by_default)
def _test_get_enable_snat(self, expected, info):
observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)
self.assertEqual(expected, observed)
def test_get_enable_snat_without_gw_info(self):
self._test_get_enable_snat(self.enable_snat_by_default, {})
def test_get_enable_snat_without_enable_snat(self):
info = {'network_id': _uuid()}
self._test_get_enable_snat(self.enable_snat_by_default, info)
def test_get_enable_snat_with_snat_enabled(self):
self._test_get_enable_snat(True, {'enable_snat': True})
def test_get_enable_snat_with_snat_disabled(self):
self._test_get_enable_snat(False, {'enable_snat': False})
class TestL3GwModeMixin(testlib_api.SqlTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
plugin = __name__ + '.' + TestDbIntPlugin.__name__
self.setup_coreplugin(plugin)
self.target_object = TestDbIntPlugin()
# Patch the context
ctx_patcher = mock.patch('neutron.context', autospec=True)
mock_context = ctx_patcher.start()
self.context = mock_context.get_admin_context()
# This ensure also calls to elevated work in unit tests
self.context.elevated.return_value = self.context
self.context.session = db_api.get_session()
# Create sample data for tests
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = net_obj.Network(
self.context,
id=self.ext_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = ext_net_models.ExternalNetwork(
network_id=self.ext_net_id)
self.network.create()
self.context.session.add(self.net_ext)
self.router = l3_models.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = models_v2.Port(
id=FAKE_GW_PORT_ID,
tenant_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_GW_PORT_MAC,
network_id=self.ext_net_id)
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.add(self.router_gw_port)
self.context.session.flush()
self.fip_ext_port = models_v2.Port(
id=FAKE_FIP_EXT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_EXT_PORT_MAC,
network_id=self.ext_net_id)
self.context.session.add(self.fip_ext_port)
self.context.session.flush()
self.int_net = net_obj.Network(
self.context,
id=self.int_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = subnet_obj.Subnet(self.context,
id=self.int_sub_id,
project_id=self.tenant_id,
ip_version=4,
cidr=utils.AuthenticIPNetwork('3.3.3.0/24'),
gateway_ip=netaddr.IPAddress('3.3.3.1'),
network_id=self.int_net_id)
self.router_port = models_v2.Port(
id=FAKE_ROUTER_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_ROUTER_PORT_MAC,
network_id=self.int_net_id)
self.router_port_ip_info = models_v2.IPAllocation(
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.int_net.create()
self.int_sub.create()
self.context.session.add(self.router_port)
self.context.session.add(self.router_port_ip_info)
self.context.session.flush()
self.fip_int_port = models_v2.Port(
id=FAKE_FIP_INT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_INT_PORT_MAC,
network_id=self.int_net_id)
self.fip_int_ip_info = models_v2.IPAllocation(
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_models.FloatingIP(
id=_uuid(),
floating_ip_address='1.1.1.2',
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.context.session.add(self.fip_int_port)
self.context.session.add(self.fip_int_ip_info)
self.context.session.add(self.fip)
self.context.session.flush()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _get_gwports_dict(self, gw_ports):
return dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
def _reset_ext_gw(self):
# Reset external gateway
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, current_enable_snat, gw_info=None,
expected_enable_snat=True):
if not current_enable_snat:
previous_gw_info = {'network_id': self.ext_net_id,
'enable_snat': current_enable_snat}
self.target_object._update_router_gw_info(
self.context, self.router.id, previous_gw_info)
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(FAKE_GW_PORT_MAC,
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(current_enable_snat=True)
def test_update_router_gw_without_info_and_snat_disabled_previously(self):
self._test_update_router_gw(current_enable_snat=False)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=True, gw_info=info)
def test_update_router_gw_with_network_and_snat_disabled_previously(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(
current_enable_snat=True, gw_info=info, expected_enable_snat=False)
def test_update_router_gw_with_snat_enabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': True}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True,
'external_fixed_ips': []},
router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False,
'external_fixed_ips': []},
router_dict[l3.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(self.context,
[router_dict],
[])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
def test_build_routers_list_with_gw_port_mismatch(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict], {})
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
def restore_l3_attribute_map(self):
l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
def tearDown(self):
super(ExtGwModeIntTestCase, self).tearDown()
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, '_get_port',
side_effect=ValueError()):
self._set_router_external_gateway(r['router']['id'],
ext_net_id,
expected_code=500)
ports = [p for p in plugin.get_ports(nctx.get_admin_context())
if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW]
self.assertFalse(ports)
def test_router_gateway_set_retry(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
with mock.patch.object(
l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',
side_effect=[db_exc.RetryRequest(None), ext_net_id]):
self._set_router_external_gateway(r['router']['id'],
ext_net_id)
res = self._show('routers', r['router']['id'])['router']
self.assertEqual(ext_net_id,
res['external_gateway_info']['network_id'])
def test_router_create_with_gwinfo_invalid_ext_ip(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ext_info = {
'network_id': s['subnet']['network_id'],
'external_fixed_ips': [{'ip_address': '10.0.0.'}]
}
error_code = exc.HTTPBadRequest.code
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
expected_code=error_code
)
msg = ("Invalid input for external_gateway_info. "
"Reason: '10.0.0.' is not a valid IP address.")
body = jsonutils.loads(res.body)
self.assertEqual(msg, body['NeutronError']['message'])
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value,
'external_fixed_ips': [{
'ip_address': mock.ANY,
'subnet_id': s['subnet']['id']}]})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(ext_net_id, res_gw_info['network_id'])
self.assertEqual(snat_expected_value,
res_gw_info['enable_snat'])
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
| [
"# Copyright 2013 VMware, Inc.\n",
"# All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"#\n",
"\n",
"import mock\n",
"import netaddr\n",
"from neutron_lib import constants\n",
"from oslo_config import cfg\n",
"from oslo_db import exception as db_exc\n",
"from oslo_serialization import jsonutils\n",
"from oslo_utils import uuidutils\n",
"import testscenarios\n",
"from webob import exc\n",
"\n",
"from neutron.common import utils\n",
"from neutron import context as nctx\n",
"from neutron.db import api as db_api\n",
"from neutron.db import l3_db\n",
"from neutron.db import l3_gwmode_db\n",
"from neutron.db.models import external_net as ext_net_models\n",
"from neutron.db.models import l3 as l3_models\n",
"from neutron.db import models_v2\n",
"from neutron.extensions import l3\n",
"from neutron.extensions import l3_ext_gw_mode\n",
"from neutron import manager\n",
"from neutron.objects import network as net_obj\n",
"from neutron.objects import subnet as subnet_obj\n",
"from neutron.tests import base\n",
"from neutron.tests.unit.db import test_db_base_plugin_v2\n",
"from neutron.tests.unit.extensions import test_l3\n",
"from neutron.tests.unit import testlib_api\n",
"\n",
"_uuid = uuidutils.generate_uuid\n",
"FAKE_GW_PORT_ID = _uuid()\n",
"FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'\n",
"FAKE_FIP_EXT_PORT_ID = _uuid()\n",
"FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'\n",
"FAKE_FIP_INT_PORT_ID = _uuid()\n",
"FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'\n",
"FAKE_ROUTER_PORT_ID = _uuid()\n",
"FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'\n",
"\n",
"\n",
"class TestExtensionManager(object):\n",
"\n",
" def get_resources(self):\n",
" # Simulate extension of L3 attribute map\n",
" for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():\n",
" l3.RESOURCE_ATTRIBUTE_MAP[key].update(\n",
" l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))\n",
" return l3.L3.get_resources()\n",
"\n",
" def get_actions(self):\n",
" return []\n",
"\n",
" def get_request_extensions(self):\n",
" return []\n",
"\n",
"\n",
"# A simple class for making a concrete class out of the mixin\n",
"# for the case of a plugin that integrates l3 routing.\n",
"class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,\n",
" l3_gwmode_db.L3_NAT_db_mixin):\n",
"\n",
" supported_extension_aliases = [\"external-net\", \"router\", \"ext-gw-mode\"]\n",
"\n",
"\n",
"# A simple class for making a concrete class out of the mixin\n",
"# for the case of a l3 router service plugin\n",
"class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,\n",
" l3_gwmode_db.L3_NAT_db_mixin):\n",
"\n",
" supported_extension_aliases = [\"router\", \"ext-gw-mode\"]\n",
"\n",
"\n",
"class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):\n",
" scenarios = [\n",
" ('enabled', {'enable_snat_by_default': True}),\n",
" ('disabled', {'enable_snat_by_default': False})]\n",
"\n",
" def setUp(self):\n",
" super(TestGetEnableSnat, self).setUp()\n",
" self.config(enable_snat_by_default=self.enable_snat_by_default)\n",
"\n",
" def _test_get_enable_snat(self, expected, info):\n",
" observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)\n",
" self.assertEqual(expected, observed)\n",
"\n",
" def test_get_enable_snat_without_gw_info(self):\n",
" self._test_get_enable_snat(self.enable_snat_by_default, {})\n",
"\n",
" def test_get_enable_snat_without_enable_snat(self):\n",
" info = {'network_id': _uuid()}\n",
" self._test_get_enable_snat(self.enable_snat_by_default, info)\n",
"\n",
" def test_get_enable_snat_with_snat_enabled(self):\n",
" self._test_get_enable_snat(True, {'enable_snat': True})\n",
"\n",
" def test_get_enable_snat_with_snat_disabled(self):\n",
" self._test_get_enable_snat(False, {'enable_snat': False})\n",
"\n",
"\n",
"class TestL3GwModeMixin(testlib_api.SqlTestCase):\n",
"\n",
" def setUp(self):\n",
" super(TestL3GwModeMixin, self).setUp()\n",
" plugin = __name__ + '.' + TestDbIntPlugin.__name__\n",
" self.setup_coreplugin(plugin)\n",
" self.target_object = TestDbIntPlugin()\n",
" # Patch the context\n",
" ctx_patcher = mock.patch('neutron.context', autospec=True)\n",
" mock_context = ctx_patcher.start()\n",
" self.context = mock_context.get_admin_context()\n",
" # This ensure also calls to elevated work in unit tests\n",
" self.context.elevated.return_value = self.context\n",
" self.context.session = db_api.get_session()\n",
" # Create sample data for tests\n",
" self.ext_net_id = _uuid()\n",
" self.int_net_id = _uuid()\n",
" self.int_sub_id = _uuid()\n",
" self.tenant_id = 'the_tenant'\n",
" self.network = net_obj.Network(\n",
" self.context,\n",
" id=self.ext_net_id,\n",
" project_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" status=constants.NET_STATUS_ACTIVE)\n",
" self.net_ext = ext_net_models.ExternalNetwork(\n",
" network_id=self.ext_net_id)\n",
" self.network.create()\n",
" self.context.session.add(self.net_ext)\n",
" self.router = l3_models.Router(\n",
" id=_uuid(),\n",
" name=None,\n",
" tenant_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" status=constants.NET_STATUS_ACTIVE,\n",
" enable_snat=True,\n",
" gw_port_id=None)\n",
" self.context.session.add(self.router)\n",
" self.context.session.flush()\n",
" self.router_gw_port = models_v2.Port(\n",
" id=FAKE_GW_PORT_ID,\n",
" tenant_id=self.tenant_id,\n",
" device_id=self.router.id,\n",
" device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,\n",
" admin_state_up=True,\n",
" status=constants.PORT_STATUS_ACTIVE,\n",
" mac_address=FAKE_GW_PORT_MAC,\n",
" network_id=self.ext_net_id)\n",
" self.router.gw_port_id = self.router_gw_port.id\n",
" self.context.session.add(self.router)\n",
" self.context.session.add(self.router_gw_port)\n",
" self.context.session.flush()\n",
" self.fip_ext_port = models_v2.Port(\n",
" id=FAKE_FIP_EXT_PORT_ID,\n",
" tenant_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" device_id=self.router.id,\n",
" device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,\n",
" status=constants.PORT_STATUS_ACTIVE,\n",
" mac_address=FAKE_FIP_EXT_PORT_MAC,\n",
" network_id=self.ext_net_id)\n",
" self.context.session.add(self.fip_ext_port)\n",
" self.context.session.flush()\n",
" self.int_net = net_obj.Network(\n",
" self.context,\n",
" id=self.int_net_id,\n",
" project_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" status=constants.NET_STATUS_ACTIVE)\n",
" self.int_sub = subnet_obj.Subnet(self.context,\n",
" id=self.int_sub_id,\n",
" project_id=self.tenant_id,\n",
" ip_version=4,\n",
" cidr=utils.AuthenticIPNetwork('3.3.3.0/24'),\n",
" gateway_ip=netaddr.IPAddress('3.3.3.1'),\n",
" network_id=self.int_net_id)\n",
" self.router_port = models_v2.Port(\n",
" id=FAKE_ROUTER_PORT_ID,\n",
" tenant_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" device_id=self.router.id,\n",
" device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,\n",
" status=constants.PORT_STATUS_ACTIVE,\n",
" mac_address=FAKE_ROUTER_PORT_MAC,\n",
" network_id=self.int_net_id)\n",
" self.router_port_ip_info = models_v2.IPAllocation(\n",
" port_id=self.router_port.id,\n",
" network_id=self.int_net.id,\n",
" subnet_id=self.int_sub_id,\n",
" ip_address='3.3.3.1')\n",
" self.int_net.create()\n",
" self.int_sub.create()\n",
" self.context.session.add(self.router_port)\n",
" self.context.session.add(self.router_port_ip_info)\n",
" self.context.session.flush()\n",
" self.fip_int_port = models_v2.Port(\n",
" id=FAKE_FIP_INT_PORT_ID,\n",
" tenant_id=self.tenant_id,\n",
" admin_state_up=True,\n",
" device_id='something',\n",
" device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',\n",
" status=constants.PORT_STATUS_ACTIVE,\n",
" mac_address=FAKE_FIP_INT_PORT_MAC,\n",
" network_id=self.int_net_id)\n",
" self.fip_int_ip_info = models_v2.IPAllocation(\n",
" port_id=self.fip_int_port.id,\n",
" network_id=self.int_net.id,\n",
" subnet_id=self.int_sub_id,\n",
" ip_address='3.3.3.3')\n",
" self.fip = l3_models.FloatingIP(\n",
" id=_uuid(),\n",
" floating_ip_address='1.1.1.2',\n",
" floating_network_id=self.ext_net_id,\n",
" floating_port_id=FAKE_FIP_EXT_PORT_ID,\n",
" fixed_port_id=None,\n",
" fixed_ip_address=None,\n",
" router_id=None)\n",
" self.context.session.add(self.fip_int_port)\n",
" self.context.session.add(self.fip_int_ip_info)\n",
" self.context.session.add(self.fip)\n",
" self.context.session.flush()\n",
" self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,\n",
" 'tenant_id': self.tenant_id}\n",
"\n",
" def _get_gwports_dict(self, gw_ports):\n",
" return dict((gw_port['id'], gw_port)\n",
" for gw_port in gw_ports)\n",
"\n",
" def _reset_ext_gw(self):\n",
" # Reset external gateway\n",
" self.router.gw_port_id = None\n",
" self.context.session.add(self.router)\n",
" self.context.session.flush()\n",
"\n",
" def _test_update_router_gw(self, current_enable_snat, gw_info=None,\n",
" expected_enable_snat=True):\n",
" if not current_enable_snat:\n",
" previous_gw_info = {'network_id': self.ext_net_id,\n",
" 'enable_snat': current_enable_snat}\n",
" self.target_object._update_router_gw_info(\n",
" self.context, self.router.id, previous_gw_info)\n",
"\n",
" self.target_object._update_router_gw_info(\n",
" self.context, self.router.id, gw_info)\n",
" router = self.target_object._get_router(\n",
" self.context, self.router.id)\n",
" try:\n",
" self.assertEqual(FAKE_GW_PORT_ID,\n",
" router.gw_port.id)\n",
" self.assertEqual(FAKE_GW_PORT_MAC,\n",
" router.gw_port.mac_address)\n",
" except AttributeError:\n",
" self.assertIsNone(router.gw_port)\n",
" self.assertEqual(expected_enable_snat, router.enable_snat)\n",
"\n",
" def test_update_router_gw_with_gw_info_none(self):\n",
" self._test_update_router_gw(current_enable_snat=True)\n",
"\n",
" def test_update_router_gw_without_info_and_snat_disabled_previously(self):\n",
" self._test_update_router_gw(current_enable_snat=False)\n",
"\n",
" def test_update_router_gw_with_network_only(self):\n",
" info = {'network_id': self.ext_net_id}\n",
" self._test_update_router_gw(current_enable_snat=True, gw_info=info)\n",
"\n",
" def test_update_router_gw_with_network_and_snat_disabled_previously(self):\n",
" info = {'network_id': self.ext_net_id}\n",
" self._test_update_router_gw(current_enable_snat=False, gw_info=info)\n",
"\n",
" def test_update_router_gw_with_snat_disabled(self):\n",
" info = {'network_id': self.ext_net_id,\n",
" 'enable_snat': False}\n",
" self._test_update_router_gw(\n",
" current_enable_snat=True, gw_info=info, expected_enable_snat=False)\n",
"\n",
" def test_update_router_gw_with_snat_enabled(self):\n",
" info = {'network_id': self.ext_net_id,\n",
" 'enable_snat': True}\n",
" self._test_update_router_gw(current_enable_snat=False, gw_info=info)\n",
"\n",
" def test_make_router_dict_no_ext_gw(self):\n",
" self._reset_ext_gw()\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO])\n",
"\n",
" def test_make_router_dict_with_ext_gw(self):\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" self.assertEqual({'network_id': self.ext_net_id,\n",
" 'enable_snat': True,\n",
" 'external_fixed_ips': []},\n",
" router_dict[l3.EXTERNAL_GW_INFO])\n",
"\n",
" def test_make_router_dict_with_ext_gw_snat_disabled(self):\n",
" self.router.enable_snat = False\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" self.assertEqual({'network_id': self.ext_net_id,\n",
" 'enable_snat': False,\n",
" 'external_fixed_ips': []},\n",
" router_dict[l3.EXTERNAL_GW_INFO])\n",
"\n",
" def test_build_routers_list_no_ext_gw(self):\n",
" self._reset_ext_gw()\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" routers = self.target_object._build_routers_list(self.context,\n",
" [router_dict],\n",
" [])\n",
" self.assertEqual(1, len(routers))\n",
" router = routers[0]\n",
" self.assertIsNone(router.get('gw_port'))\n",
" self.assertIsNone(router.get('enable_snat'))\n",
"\n",
" def test_build_routers_list_with_ext_gw(self):\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" routers = self.target_object._build_routers_list(\n",
" self.context, [router_dict],\n",
" self._get_gwports_dict([self.router.gw_port]))\n",
" self.assertEqual(1, len(routers))\n",
" router = routers[0]\n",
" self.assertIsNotNone(router.get('gw_port'))\n",
" self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])\n",
" self.assertTrue(router.get('enable_snat'))\n",
"\n",
" def test_build_routers_list_with_ext_gw_snat_disabled(self):\n",
" self.router.enable_snat = False\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" routers = self.target_object._build_routers_list(\n",
" self.context, [router_dict],\n",
" self._get_gwports_dict([self.router.gw_port]))\n",
" self.assertEqual(1, len(routers))\n",
" router = routers[0]\n",
" self.assertIsNotNone(router.get('gw_port'))\n",
" self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])\n",
" self.assertFalse(router.get('enable_snat'))\n",
"\n",
" def test_build_routers_list_with_gw_port_mismatch(self):\n",
" router_dict = self.target_object._make_router_dict(self.router)\n",
" routers = self.target_object._build_routers_list(\n",
" self.context, [router_dict], {})\n",
" self.assertEqual(1, len(routers))\n",
" router = routers[0]\n",
" self.assertIsNone(router.get('gw_port'))\n",
" self.assertIsNone(router.get('enable_snat'))\n",
"\n",
"\n",
"class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,\n",
" test_l3.L3NatTestCaseMixin):\n",
"\n",
" def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):\n",
" # Store l3 resource attribute map as it will be updated\n",
" self._l3_attribute_map_bk = {}\n",
" for item in l3.RESOURCE_ATTRIBUTE_MAP:\n",
" self._l3_attribute_map_bk[item] = (\n",
" l3.RESOURCE_ATTRIBUTE_MAP[item].copy())\n",
" plugin = plugin or (\n",
" 'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'\n",
" 'TestDbIntPlugin')\n",
" # for these tests we need to enable overlapping ips\n",
" cfg.CONF.set_default('allow_overlapping_ips', True)\n",
" ext_mgr = ext_mgr or TestExtensionManager()\n",
" super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,\n",
" ext_mgr=ext_mgr,\n",
" service_plugins=svc_plugins)\n",
" self.addCleanup(self.restore_l3_attribute_map)\n",
"\n",
" def restore_l3_attribute_map(self):\n",
" l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk\n",
"\n",
" def tearDown(self):\n",
" super(ExtGwModeIntTestCase, self).tearDown()\n",
"\n",
" def _set_router_external_gateway(self, router_id, network_id,\n",
" snat_enabled=None,\n",
" expected_code=exc.HTTPOk.code,\n",
" neutron_context=None):\n",
" ext_gw_info = {'network_id': network_id}\n",
" # Need to set enable_snat also if snat_enabled == False\n",
" if snat_enabled is not None:\n",
" ext_gw_info['enable_snat'] = snat_enabled\n",
" return self._update('routers', router_id,\n",
" {'router': {'external_gateway_info':\n",
" ext_gw_info}},\n",
" expected_code=expected_code,\n",
" neutron_context=neutron_context)\n",
"\n",
" def test_router_gateway_set_fail_after_port_create(self):\n",
" with self.router() as r, self.subnet() as s:\n",
" ext_net_id = s['subnet']['network_id']\n",
" self._set_net_external(ext_net_id)\n",
" plugin = manager.NeutronManager.get_plugin()\n",
" with mock.patch.object(plugin, '_get_port',\n",
" side_effect=ValueError()):\n",
" self._set_router_external_gateway(r['router']['id'],\n",
" ext_net_id,\n",
" expected_code=500)\n",
" ports = [p for p in plugin.get_ports(nctx.get_admin_context())\n",
" if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW]\n",
" self.assertFalse(ports)\n",
"\n",
" def test_router_gateway_set_retry(self):\n",
" with self.router() as r, self.subnet() as s:\n",
" ext_net_id = s['subnet']['network_id']\n",
" self._set_net_external(ext_net_id)\n",
" with mock.patch.object(\n",
" l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',\n",
" side_effect=[db_exc.RetryRequest(None), ext_net_id]):\n",
" self._set_router_external_gateway(r['router']['id'],\n",
" ext_net_id)\n",
" res = self._show('routers', r['router']['id'])['router']\n",
" self.assertEqual(ext_net_id,\n",
" res['external_gateway_info']['network_id'])\n",
"\n",
" def test_router_create_with_gwinfo_invalid_ext_ip(self):\n",
" with self.subnet() as s:\n",
" self._set_net_external(s['subnet']['network_id'])\n",
" ext_info = {\n",
" 'network_id': s['subnet']['network_id'],\n",
" 'external_fixed_ips': [{'ip_address': '10.0.0.'}]\n",
" }\n",
" error_code = exc.HTTPBadRequest.code\n",
" res = self._create_router(\n",
" self.fmt, _uuid(), arg_list=('external_gateway_info',),\n",
" external_gateway_info=ext_info,\n",
" expected_code=error_code\n",
" )\n",
" msg = (\"Invalid input for external_gateway_info. \"\n",
" \"Reason: '10.0.0.' is not a valid IP address.\")\n",
" body = jsonutils.loads(res.body)\n",
" self.assertEqual(msg, body['NeutronError']['message'])\n",
"\n",
" def test_router_create_show_no_ext_gwinfo(self):\n",
" name = 'router1'\n",
" tenant_id = _uuid()\n",
" expected_value = [('name', name), ('tenant_id', tenant_id),\n",
" ('admin_state_up', True), ('status', 'ACTIVE'),\n",
" ('external_gateway_info', None)]\n",
" with self.router(name=name, admin_state_up=True,\n",
" tenant_id=tenant_id) as router:\n",
" res = self._show('routers', router['router']['id'])\n",
" for k, v in expected_value:\n",
" self.assertEqual(res['router'][k], v)\n",
"\n",
" def _test_router_create_show_ext_gwinfo(self, snat_input_value,\n",
" snat_expected_value):\n",
" name = 'router1'\n",
" tenant_id = _uuid()\n",
" with self.subnet() as s:\n",
" ext_net_id = s['subnet']['network_id']\n",
" self._set_net_external(ext_net_id)\n",
" input_value = {'network_id': ext_net_id}\n",
" if snat_input_value in (True, False):\n",
" input_value['enable_snat'] = snat_input_value\n",
" expected_value = [('name', name), ('tenant_id', tenant_id),\n",
" ('admin_state_up', True), ('status', 'ACTIVE'),\n",
" ('external_gateway_info',\n",
" {'network_id': ext_net_id,\n",
" 'enable_snat': snat_expected_value,\n",
" 'external_fixed_ips': [{\n",
" 'ip_address': mock.ANY,\n",
" 'subnet_id': s['subnet']['id']}]})]\n",
" with self.router(\n",
" name=name, admin_state_up=True, tenant_id=tenant_id,\n",
" external_gateway_info=input_value) as router:\n",
" res = self._show('routers', router['router']['id'])\n",
" for k, v in expected_value:\n",
" self.assertEqual(res['router'][k], v)\n",
"\n",
" def test_router_create_show_ext_gwinfo_default(self):\n",
" self._test_router_create_show_ext_gwinfo(None, True)\n",
"\n",
" def test_router_create_show_ext_gwinfo_with_snat_enabled(self):\n",
" self._test_router_create_show_ext_gwinfo(True, True)\n",
"\n",
" def test_router_create_show_ext_gwinfo_with_snat_disabled(self):\n",
" self._test_router_create_show_ext_gwinfo(False, False)\n",
"\n",
" def _test_router_update_ext_gwinfo(self, snat_input_value,\n",
" snat_expected_value=False,\n",
" expected_http_code=exc.HTTPOk.code):\n",
" with self.router() as r:\n",
" with self.subnet() as s:\n",
" try:\n",
" ext_net_id = s['subnet']['network_id']\n",
" self._set_net_external(ext_net_id)\n",
" self._set_router_external_gateway(\n",
" r['router']['id'], ext_net_id,\n",
" snat_enabled=snat_input_value,\n",
" expected_code=expected_http_code)\n",
" if expected_http_code != exc.HTTPOk.code:\n",
" return\n",
" body = self._show('routers', r['router']['id'])\n",
" res_gw_info = body['router']['external_gateway_info']\n",
" self.assertEqual(ext_net_id, res_gw_info['network_id'])\n",
" self.assertEqual(snat_expected_value,\n",
" res_gw_info['enable_snat'])\n",
" finally:\n",
" self._remove_external_gateway_from_router(\n",
" r['router']['id'], ext_net_id)\n",
"\n",
" def test_router_update_ext_gwinfo_default(self):\n",
" self._test_router_update_ext_gwinfo(None, True)\n",
"\n",
" def test_router_update_ext_gwinfo_with_snat_enabled(self):\n",
" self._test_router_update_ext_gwinfo(True, True)\n",
"\n",
" def test_router_update_ext_gwinfo_with_snat_disabled(self):\n",
" self._test_router_update_ext_gwinfo(False, False)\n",
"\n",
" def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):\n",
" self._test_router_update_ext_gwinfo(\n",
" 'xxx', None, expected_http_code=exc.HTTPBadRequest.code)\n",
"\n",
"\n",
"class ExtGwModeSepTestCase(ExtGwModeIntTestCase):\n",
"\n",
" def setUp(self, plugin=None):\n",
" # Store l3 resource attribute map as it will be updated\n",
" self._l3_attribute_map_bk = {}\n",
" for item in l3.RESOURCE_ATTRIBUTE_MAP:\n",
" self._l3_attribute_map_bk[item] = (\n",
" l3.RESOURCE_ATTRIBUTE_MAP[item].copy())\n",
" plugin = plugin or (\n",
" 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')\n",
" # the L3 service plugin\n",
" l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'\n",
" 'TestDbSepPlugin')\n",
" svc_plugins = {'l3_plugin_name': l3_plugin}\n",
" # for these tests we need to enable overlapping ips\n",
" cfg.CONF.set_default('allow_overlapping_ips', True)\n",
" super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,\n",
" svc_plugins=svc_plugins)\n",
" self.addCleanup(self.restore_l3_attribute_map)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0.02564102564102564,
0.038461538461538464,
0.017543859649122806,
0.018867924528301886,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 545 | 0.000369 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:12064")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:12064")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Spacecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Spacecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| [
"from jsonrpc import ServiceProxy\n",
"import sys\n",
"import string\n",
"\n",
"# ===== BEGIN USER SETTINGS =====\n",
"# if you do not set these you will be prompted for a password for every command\n",
"rpcuser = \"\"\n",
"rpcpass = \"\"\n",
"# ====== END USER SETTINGS ======\n",
"\n",
"\n",
"if rpcpass == \"\":\n",
"\taccess = ServiceProxy(\"http://127.0.0.1:12064\")\n",
"else:\n",
"\taccess = ServiceProxy(\"http://\"+rpcuser+\":\"+rpcpass+\"@127.0.0.1:12064\")\n",
"cmd = sys.argv[1].lower()\n",
"\n",
"if cmd == \"backupwallet\":\n",
"\ttry:\n",
"\t\tpath = raw_input(\"Enter destination path/filename: \")\n",
"\t\tprint access.backupwallet(path)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getaccount\":\n",
"\ttry:\n",
"\t\taddr = raw_input(\"Enter a Spacecoin address: \")\n",
"\t\tprint access.getaccount(addr)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getaccountaddress\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Enter an account name: \")\n",
"\t\tprint access.getaccountaddress(acct)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getaddressesbyaccount\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Enter an account name: \")\n",
"\t\tprint access.getaddressesbyaccount(acct)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getbalance\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Enter an account (optional): \")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.getbalance(acct, mc)\n",
"\t\texcept:\n",
"\t\t\tprint access.getbalance()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getblockbycount\":\n",
"\ttry:\n",
"\t\theight = raw_input(\"Height: \")\n",
"\t\tprint access.getblockbycount(height)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getblockcount\":\n",
"\ttry:\n",
"\t\tprint access.getblockcount()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getblocknumber\":\n",
"\ttry:\n",
"\t\tprint access.getblocknumber()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getconnectioncount\":\n",
"\ttry:\n",
"\t\tprint access.getconnectioncount()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getdifficulty\":\n",
"\ttry:\n",
"\t\tprint access.getdifficulty()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getgenerate\":\n",
"\ttry:\n",
"\t\tprint access.getgenerate()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"gethashespersec\":\n",
"\ttry:\n",
"\t\tprint access.gethashespersec()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getinfo\":\n",
"\ttry:\n",
"\t\tprint access.getinfo()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getnewaddress\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Enter an account name: \")\n",
"\t\ttry:\n",
"\t\t\tprint access.getnewaddress(acct)\n",
"\t\texcept:\n",
"\t\t\tprint access.getnewaddress()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getreceivedbyaccount\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Enter an account (optional): \")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.getreceivedbyaccount(acct, mc)\n",
"\t\texcept:\n",
"\t\t\tprint access.getreceivedbyaccount()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getreceivedbyaddress\":\n",
"\ttry:\n",
"\t\taddr = raw_input(\"Enter a Spacecoin address (optional): \")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.getreceivedbyaddress(addr, mc)\n",
"\t\texcept:\n",
"\t\t\tprint access.getreceivedbyaddress()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"gettransaction\":\n",
"\ttry:\n",
"\t\ttxid = raw_input(\"Enter a transaction ID: \")\n",
"\t\tprint access.gettransaction(txid)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"getwork\":\n",
"\ttry:\n",
"\t\tdata = raw_input(\"Data (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.gettransaction(data)\n",
"\t\texcept:\n",
"\t\t\tprint access.gettransaction()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"help\":\n",
"\ttry:\n",
"\t\tcmd = raw_input(\"Command (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.help(cmd)\n",
"\t\texcept:\n",
"\t\t\tprint access.help()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"listaccounts\":\n",
"\ttry:\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.listaccounts(mc)\n",
"\t\texcept:\n",
"\t\t\tprint access.listaccounts()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"listreceivedbyaccount\":\n",
"\ttry:\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\tincemp = raw_input(\"Include empty? (true/false, optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.listreceivedbyaccount(mc, incemp)\n",
"\t\texcept:\n",
"\t\t\tprint access.listreceivedbyaccount()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"listreceivedbyaddress\":\n",
"\ttry:\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\tincemp = raw_input(\"Include empty? (true/false, optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.listreceivedbyaddress(mc, incemp)\n",
"\t\texcept:\n",
"\t\t\tprint access.listreceivedbyaddress()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"listtransactions\":\n",
"\ttry:\n",
"\t\tacct = raw_input(\"Account (optional): \")\n",
"\t\tcount = raw_input(\"Number of transactions (optional): \")\n",
"\t\tfrm = raw_input(\"Skip (optional):\")\n",
"\t\ttry:\n",
"\t\t\tprint access.listtransactions(acct, count, frm)\n",
"\t\texcept:\n",
"\t\t\tprint access.listtransactions()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"move\":\n",
"\ttry:\n",
"\t\tfrm = raw_input(\"From: \")\n",
"\t\tto = raw_input(\"To: \")\n",
"\t\tamt = raw_input(\"Amount:\")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\tcomment = raw_input(\"Comment (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.move(frm, to, amt, mc, comment)\n",
"\t\texcept:\n",
"\t\t\tprint access.move(frm, to, amt)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"sendfrom\":\n",
"\ttry:\n",
"\t\tfrm = raw_input(\"From: \")\n",
"\t\tto = raw_input(\"To: \")\n",
"\t\tamt = raw_input(\"Amount:\")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\tcomment = raw_input(\"Comment (optional): \")\n",
"\t\tcommentto = raw_input(\"Comment-to (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.sendfrom(frm, to, amt, mc, comment, commentto)\n",
"\t\texcept:\n",
"\t\t\tprint access.sendfrom(frm, to, amt)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"sendmany\":\n",
"\ttry:\n",
"\t\tfrm = raw_input(\"From: \")\n",
"\t\tto = raw_input(\"To (in format address1:amount1,address2:amount2,...): \")\n",
"\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n",
"\t\tcomment = raw_input(\"Comment (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.sendmany(frm,to,mc,comment)\n",
"\t\texcept:\n",
"\t\t\tprint access.sendmany(frm,to)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"sendtoaddress\":\n",
"\ttry:\n",
"\t\tto = raw_input(\"To (in format address1:amount1,address2:amount2,...): \")\n",
"\t\tamt = raw_input(\"Amount:\")\n",
"\t\tcomment = raw_input(\"Comment (optional): \")\n",
"\t\tcommentto = raw_input(\"Comment-to (optional): \")\n",
"\t\ttry:\n",
"\t\t\tprint access.sendtoaddress(to,amt,comment,commentto)\n",
"\t\texcept:\n",
"\t\t\tprint access.sendtoaddress(to,amt)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"setaccount\":\n",
"\ttry:\n",
"\t\taddr = raw_input(\"Address: \")\n",
"\t\tacct = raw_input(\"Account:\")\n",
"\t\tprint access.setaccount(addr,acct)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"setgenerate\":\n",
"\ttry:\n",
"\t\tgen= raw_input(\"Generate? (true/false): \")\n",
"\t\tcpus = raw_input(\"Max processors/cores (-1 for unlimited, optional):\")\n",
"\t\ttry:\n",
"\t\t\tprint access.setgenerate(gen, cpus)\n",
"\t\texcept:\n",
"\t\t\tprint access.setgenerate(gen)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"settxfee\":\n",
"\ttry:\n",
"\t\tamt = raw_input(\"Amount:\")\n",
"\t\tprint access.settxfee(amt)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"stop\":\n",
"\ttry:\n",
"\t\tprint access.stop()\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"validateaddress\":\n",
"\ttry:\n",
"\t\taddr = raw_input(\"Address: \")\n",
"\t\tprint access.validateaddress(addr)\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"walletpassphrase\":\n",
"\ttry:\n",
"\t\tpwd = raw_input(\"Enter wallet passphrase: \")\n",
"\t\taccess.walletpassphrase(pwd, 60)\n",
"\t\tprint \"\\n---Wallet unlocked---\\n\"\n",
"\texcept:\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\n",
"elif cmd == \"walletpassphrasechange\":\n",
"\ttry:\n",
"\t\tpwd = raw_input(\"Enter old wallet passphrase: \")\n",
"\t\tpwd2 = raw_input(\"Enter new wallet passphrase: \")\n",
"\t\taccess.walletpassphrasechange(pwd, pwd2)\n",
"\t\tprint\n",
"\t\tprint \"\\n---Passphrase changed---\\n\"\n",
"\texcept:\n",
"\t\tprint\n",
"\t\tprint \"\\n---An error occurred---\\n\"\n",
"\t\tprint\n",
"\n",
"else:\n",
"\tprint \"Command not found or not supported\"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0.0136986301369863,
0,
0,
0,
0.16666666666666666,
0.017857142857142856,
0.029411764705882353,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.02,
0.03125,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.021739130434782608,
0.02564102564102564,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.021739130434782608,
0.023255813953488372,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.019230769230769232,
0.01818181818181818,
0.14285714285714285,
0.02702702702702703,
0.2,
0.034482758620689655,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.030303030303030304,
0.02564102564102564,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03225806451612903,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03125,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.027777777777777776,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03225806451612903,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.034482758620689655,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.030303030303030304,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.04,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.021739130434782608,
0.14285714285714285,
0.027777777777777776,
0.2,
0.03125,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.019230769230769232,
0.01818181818181818,
0.14285714285714285,
0.02127659574468085,
0.2,
0.02564102564102564,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.01639344262295082,
0.01818181818181818,
0.14285714285714285,
0.02127659574468085,
0.2,
0.02564102564102564,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.02127659574468085,
0.027777777777777776,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.025,
0.14285714285714285,
0.02702702702702703,
0.2,
0.030303030303030304,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.023809523809523808,
0.14285714285714285,
0.038461538461538464,
0.2,
0.043478260869565216,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.01818181818181818,
0.14285714285714285,
0.030303030303030304,
0.2,
0.03225806451612903,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.01818181818181818,
0.015625,
0.14285714285714285,
0.02,
0.2,
0.025,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.01818181818181818,
0.015625,
0.14285714285714285,
0.02,
0.2,
0.025,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.023255813953488372,
0.01694915254237288,
0.02631578947368421,
0.14285714285714285,
0.0196078431372549,
0.2,
0.02857142857142857,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03571428571428571,
0.04,
0.034482758620689655,
0.01818181818181818,
0.021739130434782608,
0.14285714285714285,
0.020833333333333332,
0.2,
0.02857142857142857,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03571428571428571,
0.04,
0.034482758620689655,
0.01818181818181818,
0.021739130434782608,
0.0196078431372549,
0.14285714285714285,
0.015873015873015872,
0.2,
0.02564102564102564,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03571428571428571,
0.013333333333333334,
0.01818181818181818,
0.021739130434782608,
0.14285714285714285,
0.09090909090909091,
0.2,
0.06060606060606061,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.013333333333333334,
0.034482758620689655,
0.021739130434782608,
0.0196078431372549,
0.14285714285714285,
0.07142857142857142,
0.2,
0.05263157894736842,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03125,
0.03225806451612903,
0.05405405405405406,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.044444444444444446,
0.0136986301369863,
0.14285714285714285,
0.02564102564102564,
0.2,
0.030303030303030304,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.034482758620689655,
0.034482758620689655,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.045454545454545456,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.03125,
0.02702702702702703,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.02127659574468085,
0.02857142857142857,
0.027777777777777776,
0.2222222222222222,
0.02631578947368421,
0,
0,
0.16666666666666666,
0.0196078431372549,
0.019230769230769232,
0.023255813953488372,
0.125,
0.02564102564102564,
0.2222222222222222,
0.125,
0.02631578947368421,
0.125,
0,
0,
0.022727272727272728
] | 324 | 0.069558 |
"""
Netkit common functions
"""
__author__ = """\n""".join(['Simon Knight ([email protected])',
'Hung Nguyen ([email protected])'])
# Copyright (C) 2009-2010 by
# Simon Knight <[email protected]>
# Hung Nguyen <[email protected]>
# All rights reserved.
# BSD license.
#
# SSH connection code based on
# linux.byexamples.com/archives/346/python-how-to-access-ssh-with-pexpect/
import config
import logging
LOG = logging.getLogger("ANK")
try:
import pexpect
import pxssh
except ImportError:
LOG.error("Netkit deployment requires pexpect")
import os
import sys
from netaddr import IPNetwork
#based on http://bytes.com/topic/python/answers/619040-using-pxssh
#NOTE: only tested with assumption that SSH keys have been setup
#NOTE assumes taplist.csv exists in the LAB directory
# Prompt Netkit uses, used for expect
NETKIT_PROMPT = "~#"
#.............................................................................
class Netkit:
"""Common functions for interacting with a Netkit server."""
def __init__(self, host=None, username=None, shell_type="bash",
tapsn=IPNetwork("172.16.0.0/16")):
self.host = host
self.username = username
self.shell = None
self.shell_type = shell_type
# Assume that the admin
self.tap_host = tapsn[1]
self.tap_ip = tapsn[2]
self.NETKIT_PROMPT = NETKIT_PROMPT
#TODO configure these
self.tap_hostname = "taptunnelvm"
self.local_server = True
if self.host and self.username:
# Host and Username set, so ssh will be used
#TODO: make sure these are confirmed by the connect_server function
self.local_server = False
#TODO: state machine maintained by the connecting functions
# Disconnected | Netkit | TapHost
# use normal logger for logging? can we do this with pxssh??
self.logfile = open( os.path.join(config.log_dir, "pxssh.log"), 'w')
def get_shell(self):
""" Returns a shell connection to the Netkit server.
Connects server if no current connection.
Handles both remote (via SSH) and local server connections."""
if self.shell:
# Already connected
return self.shell
else:
# Need to connect first
self.connect_to_server()
return self.shell
def transfer_file(self, local_file):
"""Transfers file to remote host using SCP"""
# Sanity check
if self.local_server:
LOG.warn("Can only SCP to remote Netkit server")
return
child = pexpect.spawn("scp {0} {1}@{2}:.".format(local_file,
self.username, self.host))
child.logfile = self.logfile
child.expect(pexpect.EOF)
LOG.debug( "SCP result %s"% child.before.strip())
return
def connect_to_server(self):
"""Connects to Netkit server (if remote)"""
#TODO: make internal (private) function
#TODO: check state is disconnected
# Connects to the Linux machine running the Netkit lab
shell = None
if self.host and self.username:
# Connect to remote machine
ssh_link = self.shell
if ssh_link != None:
# ssh_link already set
return ssh_link
shell = pxssh.pxssh()
shell.logfile = self.logfile
LOG.info( "Connecting to {0}".format(self.host) )
shell.login(self.host, self.username)
# with pass: shell.login(self.host, self.username, self.password)
LOG.info( "Connected to " + self.host )
#TODO: set state to Netkit
else:
shell = pexpect.spawn (self.shell_type)
shell.sendline("uname")
shell.logfile = self.logfile
shell.setecho(True)
# Check Linux machine (Netkit won't run on other Unixes)
i = shell.expect(["Linux", "Darwin", pexpect.EOF, NETKIT_PROMPT])
if i == 0:
# Machine is running Linux. Send test command (ignore result)
shell.sendline("ls")
elif i == 1:
LOG.warn("Specified Netkit host is running Mac OS X, "
"please specify a Linux Netkit host.")
return None
else:
LOG.warn("Provided Netkit host is not running Linux")
self.shell = shell
return
def check_nk_installed(self):
"""Checks that Netkit is installed for given user"""
LOG.debug("Checking Netkit installed")
#Check length of netkit env var is nonzero
shell = self.shell
chk_cmd = 'hash lstart 2>&- && echo "Present" || echo >&2 "Absent"\n'
shell.sendline(chk_cmd)
i = shell.expect (["Present", "Absent"])
if i == 0:
LOG.debug("Netkit env var present")
# Netkit env var present, assume nk installed
return True
else:
LOG.debug("Netkit env not var present")
return False
def check_tunnel(self):
"""Checks TAP tunnel is active"""
LOG.debug("Checking tunnel")
tap_hostname = self.tap_hostname
shell = self.shell
taphost_started = False
#TODO: check can ping tap dest also
shell.sendline("vlist\n")
# Limit max lines to 1000
for dummy in range (0, 1000):
i = shell.expect ([
"\w+\s+(" + tap_hostname + ")\s+\d+\s+\d+", # Match host
pexpect.EOF,
"Total virtual machines", # Last line of vlist output
"vlist: not found",
])
if i == 0:
taphost_started = True
break
if i == 3:
LOG.warn("Unable to find vlist command")
else:
# Reached end
# TODO: look at using this instead of infinite loop
# Throw exception if reached here
break
# See if Tap host running
if taphost_started:
LOG.debug("Tap host machine active, tunnel should be up")
#todo: ping tap host machine ie tap_host ip to check is active
return True
else:
LOG.info("Starting tap tunnel: please enter sudo password and type '^]' (Control and right square bracket)"
"to return to AutoNetkit")
shell.sendline("vstart %s --con0=none --eth0=tap,%s,%s" % ( self.tap_hostname, self.tap_host, self.tap_ip))
shell.expect("Running ==>")
LOG.info(shell.after)
sys.stdout.flush()
shell.interact()
# Sendline in case user didn't have to sudo, and so didn't do anything
shell.sendline()
return True
def disconnect_vm(self, shell):
""" Disconnects from a Netkit virtual machine"""
shell.sendline("exit")
shell.expect("Connection to \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} closed.")
return
def connect_vm(self, host, shell, username="root", password="1234"):
""" Connects to a Netkit virtual machine"""
#TODO: modify this to use own shell, to allow multithreading
shell = self.get_shell()
#TODO: maintain state - eg if connected to netkit server or to
# a tap host
#Used within a connection already established
#TODO fix handling if ssh to remote machine first (eg from mac),
# or already on host machine (eg on linux running netkit)
#Connects using ssh, handling the different cases
#based on
#linux.byexamples.com/archives/346/python-how-to-access-ssh-with-pexpect
LOG.debug( "Connecting to tap host {0}".format(host) )
ssh_newkey = 'Are you sure you want to continue connecting'
# my ssh command line
shell.sendline('ssh {0}@{1}'.format(username, host) )
i = shell.expect([ssh_newkey, 'password:', pexpect.EOF, NETKIT_PROMPT])
if i == 0:
LOG.debug("Accepting new SSH key")
shell.sendline('yes')
i = shell.expect([ssh_newkey, 'password:', pexpect.EOF])
if i == 1:
LOG.debug( "Giving password")
shell.sendline(password)
shell.expect(NETKIT_PROMPT)
elif i == 2:
LOG.debug( "Either got key or connection timed out")
elif i == 3:
LOG.debug( "Connected using authentication key")
LOG.debug("Connected to {0}".format(host) )
return
| [
"\"\"\"\n",
"Netkit common functions \n",
"\"\"\"\n",
"__author__ = \"\"\"\\n\"\"\".join(['Simon Knight ([email protected])',\n",
" 'Hung Nguyen ([email protected])'])\n",
"# Copyright (C) 2009-2010 by \n",
"# Simon Knight <[email protected]>\n",
"# Hung Nguyen <[email protected]>\n",
"# All rights reserved.\n",
"# BSD license.\n",
"#\n",
"\n",
"# SSH connection code based on\n",
"# linux.byexamples.com/archives/346/python-how-to-access-ssh-with-pexpect/\n",
"import config\n",
"import logging\n",
"LOG = logging.getLogger(\"ANK\")\n",
"\n",
"try:\n",
" import pexpect\n",
" import pxssh\n",
"except ImportError:\n",
" LOG.error(\"Netkit deployment requires pexpect\")\n",
"\n",
"import os \n",
"import sys\n",
"\n",
"\n",
"from netaddr import IPNetwork\n",
"\n",
"\n",
"\n",
"#based on http://bytes.com/topic/python/answers/619040-using-pxssh\n",
"\n",
"\n",
"#NOTE: only tested with assumption that SSH keys have been setup \n",
"\n",
"#NOTE assumes taplist.csv exists in the LAB directory\n",
"\n",
"# Prompt Netkit uses, used for expect\n",
"NETKIT_PROMPT = \"~#\" \n",
" \n",
"#.............................................................................\n",
"class Netkit: \n",
" \"\"\"Common functions for interacting with a Netkit server.\"\"\"\n",
" \n",
"\n",
" def __init__(self, host=None, username=None, shell_type=\"bash\", \n",
" tapsn=IPNetwork(\"172.16.0.0/16\")):\n",
" self.host = host\n",
" self.username = username\n",
" self.shell = None \n",
" self.shell_type = shell_type \n",
"\n",
" # Assume that the admin \n",
" self.tap_host = tapsn[1]\n",
" self.tap_ip = tapsn[2]\n",
" self.NETKIT_PROMPT = NETKIT_PROMPT\n",
" \n",
" #TODO configure these\n",
" self.tap_hostname = \"taptunnelvm\"\n",
"\n",
" self.local_server = True\n",
" if self.host and self.username:\n",
" # Host and Username set, so ssh will be used\n",
" #TODO: make sure these are confirmed by the connect_server function\n",
" self.local_server = False\n",
" \n",
" #TODO: state machine maintained by the connecting functions\n",
" # Disconnected | Netkit | TapHost \n",
" \n",
" # use normal logger for logging? can we do this with pxssh?? \n",
" self.logfile = open( os.path.join(config.log_dir, \"pxssh.log\"), 'w')\n",
" \n",
" def get_shell(self): \n",
" \"\"\" Returns a shell connection to the Netkit server.\n",
" Connects server if no current connection.\n",
" Handles both remote (via SSH) and local server connections.\"\"\"\n",
" if self.shell: \n",
" # Already connected\n",
" return self.shell\n",
" else: \n",
" # Need to connect first\n",
" self.connect_to_server()\n",
" return self.shell\n",
" \n",
" def transfer_file(self, local_file):\n",
" \"\"\"Transfers file to remote host using SCP\"\"\"\n",
" # Sanity check\n",
" if self.local_server:\n",
" LOG.warn(\"Can only SCP to remote Netkit server\")\n",
" return\n",
"\n",
" child = pexpect.spawn(\"scp {0} {1}@{2}:.\".format(local_file,\n",
" self.username, self.host)) \n",
" child.logfile = self.logfile\n",
"\n",
" child.expect(pexpect.EOF) \n",
" LOG.debug( \"SCP result %s\"% child.before.strip())\n",
" return \n",
" \n",
" def connect_to_server(self): \n",
" \"\"\"Connects to Netkit server (if remote)\"\"\" \n",
" \n",
" #TODO: make internal (private) function\n",
" \n",
" #TODO: check state is disconnected\n",
" \n",
" # Connects to the Linux machine running the Netkit lab \n",
" shell = None \n",
" if self.host and self.username: \n",
" # Connect to remote machine\n",
"\n",
" ssh_link = self.shell\n",
" if ssh_link != None: \n",
" # ssh_link already set\n",
" return ssh_link\n",
"\n",
" shell = pxssh.pxssh() \n",
" shell.logfile = self.logfile\n",
" LOG.info( \"Connecting to {0}\".format(self.host) ) \n",
"\n",
" shell.login(self.host, self.username)\n",
" # with pass: shell.login(self.host, self.username, self.password)\n",
"\n",
" LOG.info( \"Connected to \" + self.host ) \n",
" #TODO: set state to Netkit\n",
" else: \n",
" shell = pexpect.spawn (self.shell_type) \n",
" shell.sendline(\"uname\")\n",
" \n",
" shell.logfile = self.logfile \n",
" shell.setecho(True) \n",
" # Check Linux machine (Netkit won't run on other Unixes) \n",
" i = shell.expect([\"Linux\", \"Darwin\", pexpect.EOF, NETKIT_PROMPT]) \n",
" if i == 0:\n",
" # Machine is running Linux. Send test command (ignore result)\n",
" shell.sendline(\"ls\") \n",
" elif i == 1:\n",
" LOG.warn(\"Specified Netkit host is running Mac OS X, \"\n",
" \"please specify a Linux Netkit host.\")\n",
" return None \n",
" else:\n",
" LOG.warn(\"Provided Netkit host is not running Linux\")\n",
"\n",
" self.shell = shell \n",
" return\n",
"\n",
"\n",
" def check_nk_installed(self): \n",
" \"\"\"Checks that Netkit is installed for given user\"\"\"\n",
" \n",
" LOG.debug(\"Checking Netkit installed\")\n",
" \n",
" #Check length of netkit env var is nonzero\n",
" shell = self.shell\n",
" chk_cmd = 'hash lstart 2>&- && echo \"Present\" || echo >&2 \"Absent\"\\n'\n",
" shell.sendline(chk_cmd)\n",
" i = shell.expect ([\"Present\", \"Absent\"]) \n",
" if i == 0: \n",
" LOG.debug(\"Netkit env var present\")\n",
" # Netkit env var present, assume nk installed\n",
" return True\n",
" else:\n",
" LOG.debug(\"Netkit env not var present\")\n",
" return False\n",
"\n",
" \n",
" def check_tunnel(self): \n",
" \"\"\"Checks TAP tunnel is active\"\"\"\n",
" \n",
" LOG.debug(\"Checking tunnel\")\n",
" tap_hostname = self.tap_hostname \n",
" shell = self.shell \n",
" \n",
" taphost_started = False\n",
" #TODO: check can ping tap dest also\n",
"\n",
" shell.sendline(\"vlist\\n\") \n",
" \n",
" # Limit max lines to 1000\n",
" for dummy in range (0, 1000):\n",
" i = shell.expect ([\n",
" \"\\w+\\s+(\" + tap_hostname + \")\\s+\\d+\\s+\\d+\", # Match host\n",
" pexpect.EOF,\n",
" \"Total virtual machines\", # Last line of vlist output\n",
" \"vlist: not found\",\n",
" ])\n",
" if i == 0: \n",
" taphost_started = True \n",
" break\n",
" if i == 3:\n",
" LOG.warn(\"Unable to find vlist command\")\n",
" else: \n",
" # Reached end\n",
" # TODO: look at using this instead of infinite loop\n",
" # Throw exception if reached here \n",
" break \n",
" \n",
" # See if Tap host running \n",
" if taphost_started:\n",
" LOG.debug(\"Tap host machine active, tunnel should be up\")\n",
" #todo: ping tap host machine ie tap_host ip to check is active \n",
" return True \n",
" else:\n",
" LOG.info(\"Starting tap tunnel: please enter sudo password and type '^]' (Control and right square bracket)\"\n",
" \"to return to AutoNetkit\")\n",
" shell.sendline(\"vstart %s --con0=none --eth0=tap,%s,%s\" % ( self.tap_hostname, self.tap_host, self.tap_ip))\n",
" shell.expect(\"Running ==>\")\n",
" LOG.info(shell.after)\n",
" sys.stdout.flush()\n",
" shell.interact()\n",
"# Sendline in case user didn't have to sudo, and so didn't do anything\n",
" shell.sendline()\n",
"\n",
" return True\n",
" \n",
" def disconnect_vm(self, shell): \n",
" \"\"\" Disconnects from a Netkit virtual machine\"\"\"\n",
" shell.sendline(\"exit\")\n",
" shell.expect(\"Connection to \\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3} closed.\")\n",
" return\n",
"\n",
" def connect_vm(self, host, shell, username=\"root\", password=\"1234\"): \n",
" \"\"\" Connects to a Netkit virtual machine\"\"\"\n",
"#TODO: modify this to use own shell, to allow multithreading\n",
" \n",
" shell = self.get_shell()\n",
" #TODO: maintain state - eg if connected to netkit server or to\n",
" # a tap host\n",
" \n",
" #Used within a connection already established\n",
" #TODO fix handling if ssh to remote machine first (eg from mac),\n",
" # or already on host machine (eg on linux running netkit) \n",
" \n",
" #Connects using ssh, handling the different cases \n",
" #based on \n",
" #linux.byexamples.com/archives/346/python-how-to-access-ssh-with-pexpect\n",
" LOG.debug( \"Connecting to tap host {0}\".format(host) )\n",
" \n",
" ssh_newkey = 'Are you sure you want to continue connecting'\n",
" # my ssh command line \n",
" shell.sendline('ssh {0}@{1}'.format(username, host) )\n",
" \n",
" i = shell.expect([ssh_newkey, 'password:', pexpect.EOF, NETKIT_PROMPT]) \n",
" if i == 0:\n",
" LOG.debug(\"Accepting new SSH key\")\n",
" shell.sendline('yes')\n",
" i = shell.expect([ssh_newkey, 'password:', pexpect.EOF])\n",
" if i == 1:\n",
" LOG.debug( \"Giving password\")\n",
" shell.sendline(password)\n",
" shell.expect(NETKIT_PROMPT)\n",
" elif i == 2:\n",
" LOG.debug( \"Either got key or connection timed out\")\n",
" elif i == 3: \n",
" LOG.debug( \"Connected using authentication key\")\n",
" \n",
" LOG.debug(\"Connected to {0}\".format(host) )\n",
" \n",
" return\n",
" \n",
" \n"
] | [
0,
0.037037037037037035,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0.09090909090909091,
0,
0,
0.03333333333333333,
0,
0,
0,
0.029850746268656716,
0,
0,
0.030303030303030304,
0,
0.018518518518518517,
0,
0,
0.041666666666666664,
0.25,
0.012658227848101266,
0.1,
0,
0.2,
0,
0.028985507246376812,
0,
0,
0,
0.02857142857142857,
0.02631578947368421,
0,
0.030303030303030304,
0,
0,
0,
0.07142857142857142,
0.03333333333333333,
0,
0,
0,
0,
0,
0.0125,
0,
0.1111111111111111,
0.014705882352941176,
0.023255813953488372,
0.1111111111111111,
0.014285714285714285,
0.012987012987012988,
0.08333333333333333,
0.03571428571428571,
0,
0,
0,
0.027777777777777776,
0,
0,
0.04,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.044444444444444446,
0,
0,
0.02857142857142857,
0.03389830508474576,
0.0625,
0.1111111111111111,
0.02857142857142857,
0.01818181818181818,
0.1111111111111111,
0.020833333333333332,
0.1111111111111111,
0.023255813953488372,
0.1111111111111111,
0.015151515151515152,
0.038461538461538464,
0.023809523809523808,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0.02631578947368421,
0,
0.046875,
0,
0,
0,
0,
0.05454545454545454,
0.02564102564102564,
0.058823529411764705,
0.03773584905660377,
0,
0.07692307692307693,
0.022222222222222223,
0.029411764705882353,
0.013888888888888888,
0.012658227848101266,
0,
0,
0.02631578947368421,
0,
0,
0.01694915254237288,
0.034482758620689655,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0.05555555555555555,
0,
0.1111111111111111,
0,
0.125,
0.0196078431372549,
0,
0,
0,
0.03773584905660377,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.06666666666666667,
0,
0.1111111111111111,
0,
0.046511627906976744,
0.023809523809523808,
0.030303030303030304,
0,
0.022727272727272728,
0,
0.02631578947368421,
0.1111111111111111,
0,
0.02631578947368421,
0.03125,
0.0958904109589041,
0,
0.014285714285714285,
0,
0,
0.04,
0.023255813953488372,
0,
0,
0,
0.043478260869565216,
0,
0,
0.017857142857142856,
0.017241379310344827,
0.1111111111111111,
0.027777777777777776,
0,
0,
0.02564102564102564,
0.012658227848101266,
0,
0.008333333333333333,
0.02127659574468085,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.02702702702702703,
0,
0,
0.05128205128205128,
0,
0,
0.013513513513513514,
0,
0.01639344262295082,
0.03225806451612903,
0,
0.014084507042253521,
0,
0.1111111111111111,
0.018518518518518517,
0.0136986301369863,
0.014285714285714285,
0.1111111111111111,
0.03389830508474576,
0.10526315789473684,
0.024691358024691357,
0.031746031746031744,
0.1111111111111111,
0,
0.027777777777777776,
0.016129032258064516,
0.125,
0.012345679012345678,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0.015384615384615385,
0.041666666666666664,
0.01639344262295082,
0.1111111111111111,
0.018867924528301886,
0.1111111111111111,
0,
0.2,
0.08333333333333333
] | 263 | 0.024737 |
import argparse
import cPickle as pickle
import nltk
import string
import os
import numpy as np
import re
import scipy.sparse as ss
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def main():
parser = argparse.ArgumentParser(description="Parse texts from comments");
parser.add_argument('-i', '--input', required=True, help="The input file where each line starts with a item id and its textual content, separated by the spliter");
parser.add_argument('-o', '--output', required=True, help="The output path");
parser.add_argument('-s', '--split', default="::", help="The spliter");
parser.add_argument('-n', '--number', type=int, default=8000, help="The number of words to be used in the vectorization");
args = parser.parse_args();
# Initialize the parameters
dictPath = os.path.join(args.output, 'dict.csv');
mPath = os.path.join(args.output, 'multi.dat');
nPath = os.path.join(args.output, 'mat.npy');
tPath = os.path.join(args.output, 'tfidf.npy');
itexts = dict();
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
# Read and process data from csv
for line in open(args.input):
terms = line.strip().split(args.split);
iid = int(terms[0]) - 1;
text = terms[1];
if iid not in itexts:
itexts[iid] = '';
no_punctuation = text.decode('utf8').encode('ascii','ignore').lower().translate(replace_punctuation);
no_punctuation = re.sub(r'\d+', '', no_punctuation);
no_punctuation = ' '.join( [w for w in no_punctuation.split() if len(w)>1] )
itexts[iid] += ' ' + no_punctuation;
# Stem and generate the word list
model = TfidfVectorizer(tokenizer=tokenize, stop_words='english', norm=None, use_idf=False);
tfs = model.fit_transform(itexts.values());
vocabulary = model.vocabulary_;
model = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=vocabulary, norm=None, use_idf=True);
model.fit(itexts.values());
idf = model.idf_;
wcounts = np.sum(tfs.toarray().astype(np.int32), axis=0);
wweights = dict();
for word in vocabulary:
wid = vocabulary[word];
wweights[word] = wcounts[wid] * idf[wid];
topwords = sorted(wweights, key=wweights.get, reverse=True)[0:args.number];
tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=topwords, use_idf=False, norm=None);
tfs = tfidf.fit_transform(itexts.values()).toarray().astype(np.float32);
fid = open(mPath, 'w');
for i in range(len(itexts.keys())):
count = np.sum(tfs[i,:]!=0);
fid.write('%d'%count);
if count != 0:
for j in range(args.number):
if tfs[i,j] != 0:
fid.write(' %d:%d'%(j, int(tfs[i,j])));
fid.write('\n');
fid.close();
pickle.dump(ss.csc_matrix(tfs), open(nPath, 'w'));
tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=topwords, use_idf=True, norm='l2');
tfs = tfidf.fit_transform(itexts.values()).toarray().astype(np.float32);
pickle.dump(ss.csc_matrix(tfs), open(tPath, 'w'));
fid = open(dictPath, 'w');
for i in range(len(topwords)):
fid.write(topwords[i] + '\n');
fid.close();
if __name__ == '__main__':
main();
| [
"import argparse\n",
"import cPickle as pickle\n",
"import nltk\n",
"import string\n",
"import os\n",
"import numpy as np\n",
"import re\n",
"import scipy.sparse as ss\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from nltk.stem.porter import PorterStemmer\n",
"\n",
"stemmer = PorterStemmer()\n",
"\n",
"def tokenize(text):\n",
" tokens = nltk.word_tokenize(text)\n",
" stems = stem_tokens(tokens, stemmer)\n",
" return stems\n",
"\n",
"def stem_tokens(tokens, stemmer):\n",
" stemmed = []\n",
" for item in tokens:\n",
" stemmed.append(stemmer.stem(item))\n",
" return stemmed\n",
"\n",
"def main():\n",
" parser = argparse.ArgumentParser(description=\"Parse texts from comments\");\n",
" parser.add_argument('-i', '--input', required=True, help=\"The input file where each line starts with a item id and its textual content, separated by the spliter\");\n",
" parser.add_argument('-o', '--output', required=True, help=\"The output path\");\n",
" parser.add_argument('-s', '--split', default=\"::\", help=\"The spliter\");\n",
" parser.add_argument('-n', '--number', type=int, default=8000, help=\"The number of words to be used in the vectorization\");\n",
" args = parser.parse_args();\n",
" # Initialize the parameters\n",
" dictPath = os.path.join(args.output, 'dict.csv');\n",
" mPath = os.path.join(args.output, 'multi.dat');\n",
" nPath = os.path.join(args.output, 'mat.npy');\n",
" tPath = os.path.join(args.output, 'tfidf.npy');\n",
" itexts = dict();\n",
" replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))\n",
" # Read and process data from csv\n",
" for line in open(args.input):\n",
" terms = line.strip().split(args.split);\n",
" iid = int(terms[0]) - 1;\n",
" text = terms[1];\n",
" if iid not in itexts:\n",
" itexts[iid] = '';\n",
" no_punctuation = text.decode('utf8').encode('ascii','ignore').lower().translate(replace_punctuation);\n",
" no_punctuation = re.sub(r'\\d+', '', no_punctuation);\n",
" no_punctuation = ' '.join( [w for w in no_punctuation.split() if len(w)>1] )\n",
" itexts[iid] += ' ' + no_punctuation;\n",
" # Stem and generate the word list\n",
" model = TfidfVectorizer(tokenizer=tokenize, stop_words='english', norm=None, use_idf=False);\n",
" tfs = model.fit_transform(itexts.values());\n",
" vocabulary = model.vocabulary_;\n",
" model = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=vocabulary, norm=None, use_idf=True);\n",
" model.fit(itexts.values());\n",
" idf = model.idf_;\n",
" wcounts = np.sum(tfs.toarray().astype(np.int32), axis=0);\n",
" wweights = dict();\n",
" for word in vocabulary:\n",
" wid = vocabulary[word];\n",
" wweights[word] = wcounts[wid] * idf[wid];\n",
" topwords = sorted(wweights, key=wweights.get, reverse=True)[0:args.number];\n",
" tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=topwords, use_idf=False, norm=None);\n",
" tfs = tfidf.fit_transform(itexts.values()).toarray().astype(np.float32);\n",
" fid = open(mPath, 'w');\n",
" for i in range(len(itexts.keys())):\n",
" count = np.sum(tfs[i,:]!=0);\n",
" fid.write('%d'%count);\n",
" if count != 0:\n",
" for j in range(args.number):\n",
" if tfs[i,j] != 0:\n",
" fid.write(' %d:%d'%(j, int(tfs[i,j])));\n",
" fid.write('\\n');\n",
" fid.close();\n",
" pickle.dump(ss.csc_matrix(tfs), open(nPath, 'w'));\n",
" tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english', vocabulary=topwords, use_idf=True, norm='l2');\n",
" tfs = tfidf.fit_transform(itexts.values()).toarray().astype(np.float32);\n",
" pickle.dump(ss.csc_matrix(tfs), open(tPath, 'w'));\n",
" fid = open(dictPath, 'w');\n",
" for i in range(len(topwords)):\n",
" fid.write(topwords[i] + '\\n');\n",
" fid.close();\n",
"\n",
"if __name__ == '__main__':\n",
" main();\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.08333333333333333,
0.012658227848101266,
0.011904761904761904,
0.024390243902439025,
0.013157894736842105,
0.015748031496062992,
0.03125,
0,
0.018518518518518517,
0.019230769230769232,
0.02,
0.019230769230769232,
0.047619047619047616,
0.010869565217391304,
0,
0,
0.020833333333333332,
0.05555555555555555,
0.07407407407407407,
0,
0.03333333333333333,
0.02727272727272727,
0.01639344262295082,
0.047058823529411764,
0.022222222222222223,
0,
0.020618556701030927,
0.04,
0.027777777777777776,
0.01680672268907563,
0.03125,
0.08333333333333333,
0.031746031746031744,
0.043478260869565216,
0,
0.03125,
0.02,
0.0125,
0.01694915254237288,
0.012987012987012988,
0.03571428571428571,
0,
0.08108108108108109,
0.06451612903225806,
0,
0,
0.029411764705882353,
0.05,
0.04,
0.058823529411764705,
0.01818181818181818,
0.017094017094017096,
0.012987012987012988,
0.01818181818181818,
0.03225806451612903,
0,
0.02564102564102564,
0.058823529411764705,
0,
0.037037037037037035,
0.08333333333333333
] | 86 | 0.020417 |
# Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_subsidence.py' is part of ERT - Ensemble based
# Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Calculate dynamic change in gravitational strength.
The ecl_subsidence module contains functionality to load time-lapse ECLIPSE
results and calculate the change in seafloor subsidence between the
different surveys. The implementation is a thin wrapper around the
ecl_subsidence.c implementation in the libecl library.
"""
from cwrap import BaseCClass
from ecl import EclPrototype
from ecl.util.util import monkey_the_camel
import ecl.grid
class EclSubsidence(BaseCClass):
"""
Holding ECLIPSE results for calculating subsidence changes.
The EclSubsidence class is a collection class holding the results from
ECLIPSE forward modelling of subsidence surveys. Observe that the
class is focused on the ECLIPSE side of things, and does not have
any notion of observed values or measurement locations; that
should be handled by the scope using the EclSubsidence class.
Typical use of the EclSubsidence class involves the following steps:
1. Create the EclSubsidence instance.
2. Add surveys with the add_survey_XXXX() methods.
3. Evalute the subsidence response with the eval() method.
"""
TYPE_NAME = "ecl_subsidence"
_alloc = EclPrototype("void* ecl_subsidence_alloc( ecl_grid , ecl_file )" , bind = False)
_free = EclPrototype("void ecl_subsidence_free( ecl_subsidence )")
_add_survey_PRESSURE = EclPrototype("void* ecl_subsidence_add_survey_PRESSURE( ecl_subsidence , char* , ecl_file_view )")
_eval = EclPrototype("double ecl_subsidence_eval( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double)")
_eval_geertsma = EclPrototype("double ecl_subsidence_eval_geertsma( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double, double)")
_eval_geertsma_rporv = EclPrototype("double ecl_subsidence_eval_geertsma_rporv( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double, double)")
_has_survey = EclPrototype("bool ecl_subsidence_has_survey( ecl_subsidence , char*)")
def __init__( self, grid, init_file ):
"""
Creates a new EclSubsidence instance.
The input arguments @grid and @init_file should be instances
of EclGrid and EclFile respectively.
"""
self.init_file = init_file # Inhibit premature garbage collection of init_file
c_ptr = self._alloc( grid , init_file )
super( EclSubsidence , self ).__init__( c_ptr )
def __contains__(self , survey_name):
return self._has_survey( survey_name )
def add_survey_PRESSURE( self, survey_name, restart_file ):
"""
Add new survey based on PRESSURE keyword.
Add a new survey; in this context a survey is the state of
reservoir, i.e. an ECLIPSE restart file. The @survey_name
input argument will be used when refering to this survey at a
later stage. The @restart_file input argument should be an
EclFile instance with data from one report step. A typical way
to load the @restart_file argument is:
import datetime
import ecl.ecl.ecl as ecl
...
...
date = datetime.datetime( year , month , day )
restart_file1 = ecl.EclFile.restart_block( "ECLIPSE.UNRST" , dtime = date)
restart_file2 = ecl.EclFile.restart_block( "ECLIPSE.UNRST" , report_step = 67 )
The pore volume is calculated from the initial pore volume and
the PRESSURE keyword from the restart file.
"""
self._add_survey_PRESSURE( survey_name, restart_file)
def eval_geertsma(self, base_survey, monitor_survey, pos, youngs_modulus, poisson_ratio, seabed, region=None):
if not base_survey in self:
raise KeyError("No such survey: %s" % base_survey)
if monitor_survey is not None:
if not monitor_survey in self:
raise KeyError("No such survey: %s" % monitor_survey)
return self._eval_geertsma(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], youngs_modulus, poisson_ratio, seabed)
def eval_geertsma_rporv(self, base_survey, monitor_survey, pos, youngs_modulus, poisson_ratio, seabed, region=None):
if not base_survey in self:
raise KeyError("No such survey: %s" % base_survey)
if monitor_survey is not None:
if not monitor_survey in self:
raise KeyError("No such survey: %s" % monitor_survey)
return self._eval_geertsma_rporv(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], youngs_modulus, poisson_ratio, seabed)
def eval(self, base_survey, monitor_survey, pos, compressibility, poisson_ratio, region=None):
"""
Calculates the subsidence change between two surveys.
This is the method everything is leading up to; will calculate
the change in subsidence, in centimeters,
between the two surveys named @base_survey and
@monitor_survey.
The monitor survey can be 'None' - the resulting answer has
nothing whatsovever to do with subsidence, but can be
interesting to determine the numerical size of the quantities
which are subtracted in a 4D study.
The @pos argument should be a tuple of three elements with the
(utm_x , utm_y , depth) position where we want to evaluate the
change in subsidence.
If supplied the optional argument @region should be an
EclRegion() instance; this region will be used to limit the
part of the reserviour included in the subsidence calculations.
The argument @compressibility is the total reservoir compressibility.
"""
if not base_survey in self:
raise KeyError("No such survey: %s" % base_survey)
if not monitor_survey in self:
raise KeyError("No such survey: %s" % monitor_survey)
return self._eval(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], compressibility,poisson_ratio)
def free(self):
self._free( )
monkey_the_camel(EclSubsidence, 'evalGeertsma', EclSubsidence.eval_geertsma)
| [
"# Copyright (C) 2011 Equinor ASA, Norway.\n",
"#\n",
"# The file 'ecl_subsidence.py' is part of ERT - Ensemble based\n",
"# Reservoir Tool.\n",
"#\n",
"# ERT is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# ERT is distributed in the hope that it will be useful, but WITHOUT ANY\n",
"# WARRANTY; without even the implied warranty of MERCHANTABILITY or\n",
"# FITNESS FOR A PARTICULAR PURPOSE.\n",
"#\n",
"# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>\n",
"# for more details.\n",
"\"\"\"\n",
"Calculate dynamic change in gravitational strength.\n",
"\n",
"The ecl_subsidence module contains functionality to load time-lapse ECLIPSE\n",
"results and calculate the change in seafloor subsidence between the\n",
"different surveys. The implementation is a thin wrapper around the\n",
"ecl_subsidence.c implementation in the libecl library.\n",
"\"\"\"\n",
"from cwrap import BaseCClass\n",
"from ecl import EclPrototype\n",
"from ecl.util.util import monkey_the_camel\n",
"import ecl.grid\n",
"\n",
"class EclSubsidence(BaseCClass):\n",
" \"\"\"\n",
" Holding ECLIPSE results for calculating subsidence changes.\n",
"\n",
" The EclSubsidence class is a collection class holding the results from\n",
" ECLIPSE forward modelling of subsidence surveys. Observe that the\n",
" class is focused on the ECLIPSE side of things, and does not have\n",
" any notion of observed values or measurement locations; that\n",
" should be handled by the scope using the EclSubsidence class.\n",
"\n",
" Typical use of the EclSubsidence class involves the following steps:\n",
"\n",
" 1. Create the EclSubsidence instance.\n",
" 2. Add surveys with the add_survey_XXXX() methods.\n",
" 3. Evalute the subsidence response with the eval() method.\n",
" \"\"\"\n",
" TYPE_NAME = \"ecl_subsidence\"\n",
" _alloc = EclPrototype(\"void* ecl_subsidence_alloc( ecl_grid , ecl_file )\" , bind = False)\n",
" _free = EclPrototype(\"void ecl_subsidence_free( ecl_subsidence )\")\n",
" _add_survey_PRESSURE = EclPrototype(\"void* ecl_subsidence_add_survey_PRESSURE( ecl_subsidence , char* , ecl_file_view )\")\n",
" _eval = EclPrototype(\"double ecl_subsidence_eval( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double)\")\n",
" _eval_geertsma = EclPrototype(\"double ecl_subsidence_eval_geertsma( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double, double)\")\n",
" _eval_geertsma_rporv = EclPrototype(\"double ecl_subsidence_eval_geertsma_rporv( ecl_subsidence , char* , char* , ecl_region , double , double , double, double, double, double)\")\n",
" _has_survey = EclPrototype(\"bool ecl_subsidence_has_survey( ecl_subsidence , char*)\")\n",
"\n",
" def __init__( self, grid, init_file ):\n",
" \"\"\"\n",
" Creates a new EclSubsidence instance.\n",
"\n",
" The input arguments @grid and @init_file should be instances\n",
" of EclGrid and EclFile respectively.\n",
" \"\"\"\n",
" self.init_file = init_file # Inhibit premature garbage collection of init_file\n",
" c_ptr = self._alloc( grid , init_file )\n",
" super( EclSubsidence , self ).__init__( c_ptr )\n",
"\n",
"\n",
" def __contains__(self , survey_name):\n",
" return self._has_survey( survey_name )\n",
"\n",
"\n",
"\n",
" def add_survey_PRESSURE( self, survey_name, restart_file ):\n",
" \"\"\"\n",
" Add new survey based on PRESSURE keyword.\n",
"\n",
" Add a new survey; in this context a survey is the state of\n",
" reservoir, i.e. an ECLIPSE restart file. The @survey_name\n",
" input argument will be used when refering to this survey at a\n",
" later stage. The @restart_file input argument should be an\n",
" EclFile instance with data from one report step. A typical way\n",
" to load the @restart_file argument is:\n",
"\n",
" import datetime\n",
" import ecl.ecl.ecl as ecl\n",
" ...\n",
" ...\n",
" date = datetime.datetime( year , month , day )\n",
" restart_file1 = ecl.EclFile.restart_block( \"ECLIPSE.UNRST\" , dtime = date)\n",
" restart_file2 = ecl.EclFile.restart_block( \"ECLIPSE.UNRST\" , report_step = 67 )\n",
"\n",
" The pore volume is calculated from the initial pore volume and\n",
" the PRESSURE keyword from the restart file.\n",
" \"\"\"\n",
" self._add_survey_PRESSURE( survey_name, restart_file)\n",
"\n",
"\n",
" def eval_geertsma(self, base_survey, monitor_survey, pos, youngs_modulus, poisson_ratio, seabed, region=None):\n",
" if not base_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % base_survey)\n",
"\n",
" if monitor_survey is not None:\n",
" if not monitor_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % monitor_survey)\n",
"\n",
" return self._eval_geertsma(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], youngs_modulus, poisson_ratio, seabed)\n",
"\n",
" def eval_geertsma_rporv(self, base_survey, monitor_survey, pos, youngs_modulus, poisson_ratio, seabed, region=None):\n",
" if not base_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % base_survey)\n",
"\n",
" if monitor_survey is not None:\n",
" if not monitor_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % monitor_survey)\n",
"\n",
" return self._eval_geertsma_rporv(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], youngs_modulus, poisson_ratio, seabed)\n",
"\n",
"\n",
" def eval(self, base_survey, monitor_survey, pos, compressibility, poisson_ratio, region=None):\n",
" \"\"\"\n",
" Calculates the subsidence change between two surveys.\n",
"\n",
" This is the method everything is leading up to; will calculate\n",
" the change in subsidence, in centimeters,\n",
" between the two surveys named @base_survey and\n",
" @monitor_survey.\n",
"\n",
" The monitor survey can be 'None' - the resulting answer has\n",
" nothing whatsovever to do with subsidence, but can be\n",
" interesting to determine the numerical size of the quantities\n",
" which are subtracted in a 4D study.\n",
"\n",
" The @pos argument should be a tuple of three elements with the\n",
" (utm_x , utm_y , depth) position where we want to evaluate the\n",
" change in subsidence.\n",
"\n",
" If supplied the optional argument @region should be an\n",
" EclRegion() instance; this region will be used to limit the\n",
" part of the reserviour included in the subsidence calculations.\n",
"\n",
" The argument @compressibility is the total reservoir compressibility.\n",
" \"\"\"\n",
" if not base_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % base_survey)\n",
"\n",
" if not monitor_survey in self:\n",
" raise KeyError(\"No such survey: %s\" % monitor_survey)\n",
"\n",
" return self._eval(base_survey, monitor_survey, region, pos[0], pos[1], pos[2], compressibility,poisson_ratio)\n",
"\n",
"\n",
"\n",
" def free(self):\n",
" self._free( )\n",
"\n",
"\n",
"monkey_the_camel(EclSubsidence, 'evalGeertsma', EclSubsidence.eval_geertsma)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.046296296296296294,
0.023255813953488372,
0.007874015748031496,
0.012578616352201259,
0.011363636363636364,
0.005494505494505495,
0.02,
0,
0.046511627906976744,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0.0625,
0.08928571428571429,
0,
0,
0.047619047619047616,
0.0425531914893617,
0,
0,
0,
0.046875,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.01098901098901099,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0.017391304347826087,
0.027777777777777776,
0,
0,
0,
0.023255813953488372,
0,
0,
0.007407407407407408,
0,
0.008264462809917356,
0.027777777777777776,
0,
0,
0,
0.023255813953488372,
0,
0,
0.0070921985815602835,
0,
0,
0.020202020202020204,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0.02564102564102564,
0,
0,
0.01694915254237288,
0,
0,
0,
0.05,
0.045454545454545456,
0,
0,
0
] | 156 | 0.005582 |
# -*- coding: utf-8 -*-
import codecs
import os
import re
from setuptools import setup
script_dir = os.path.dirname(os.path.abspath(__file__))
def read_text_file(path):
with codecs.open(path, 'r', 'utf-8') as f:
return f.read()
def find_version(*path):
contents = read_text_file(os.path.join(script_dir, *path))
# The version line must have the form
# version_info = (X, Y, Z)
m = re.search(
r'^version_info\s*=\s*\(\s*(?P<v0>\d+)\s*,\s*(?P<v1>\d+)\s*,\s*(?P<v2>\d+)\s*\)\s*$',
contents,
re.MULTILINE,
)
if m:
return '%s.%s.%s' % (m.group('v0'), m.group('v1'), m.group('v2'))
raise RuntimeError('Unable to determine package version.')
setup(
name='py-flags',
version=find_version('src', 'flags.py'),
description='Type-safe (bit)flags for python 3',
long_description=read_text_file(os.path.join(script_dir, 'README.rst')),
keywords='flags bit flag set bitfield bool arithmetic',
url='https://github.com/pasztorpisti/py-flags',
author='István Pásztor',
author_email='[email protected]',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=['dictionaries==0.0.2'],
py_modules=['flags'],
package_dir={'': 'src'},
test_suite='tests',
)
| [
"# -*- coding: utf-8 -*-\n",
"import codecs\n",
"import os\n",
"import re\n",
"\n",
"from setuptools import setup\n",
"\n",
"\n",
"script_dir = os.path.dirname(os.path.abspath(__file__))\n",
"\n",
"\n",
"def read_text_file(path):\n",
" with codecs.open(path, 'r', 'utf-8') as f:\n",
" return f.read()\n",
"\n",
"\n",
"def find_version(*path):\n",
" contents = read_text_file(os.path.join(script_dir, *path))\n",
"\n",
" # The version line must have the form\n",
" # version_info = (X, Y, Z)\n",
" m = re.search(\n",
" r'^version_info\\s*=\\s*\\(\\s*(?P<v0>\\d+)\\s*,\\s*(?P<v1>\\d+)\\s*,\\s*(?P<v2>\\d+)\\s*\\)\\s*$',\n",
" contents,\n",
" re.MULTILINE,\n",
" )\n",
" if m:\n",
" return '%s.%s.%s' % (m.group('v0'), m.group('v1'), m.group('v2'))\n",
" raise RuntimeError('Unable to determine package version.')\n",
"\n",
"\n",
"setup(\n",
" name='py-flags',\n",
" version=find_version('src', 'flags.py'),\n",
" description='Type-safe (bit)flags for python 3',\n",
" long_description=read_text_file(os.path.join(script_dir, 'README.rst')),\n",
" keywords='flags bit flag set bitfield bool arithmetic',\n",
"\n",
" url='https://github.com/pasztorpisti/py-flags',\n",
"\n",
" author='István Pásztor',\n",
" author_email='[email protected]',\n",
"\n",
" license='MIT',\n",
"\n",
" classifiers=[\n",
" 'License :: OSI Approved :: MIT License',\n",
"\n",
" 'Development Status :: 5 - Production/Stable',\n",
" 'Intended Audience :: Developers',\n",
" 'Topic :: Software Development :: Libraries :: Python Modules',\n",
"\n",
" 'Programming Language :: Python :: 3',\n",
" 'Programming Language :: Python :: Implementation :: CPython',\n",
" 'Programming Language :: Python :: Implementation :: PyPy',\n",
" ],\n",
"\n",
" install_requires=['dictionaries==0.0.2'],\n",
" py_modules=['flags'],\n",
" package_dir={'': 'src'},\n",
"\n",
" test_suite='tests',\n",
")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 63 | 0.000169 |
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import urllib2
class ProxyInfo:
def __init__(self, proxy, port, scheme="http", username="", password=""):
""" Retrieves a new ProxyInfo object
Arguments:
proxy: String - Name or IP of the Proxy server
port: Int - The port of the proxy server
Keyword Arguments:
scheme: String - [opt] The type of proxy (http is default)
username: String - [opt] The username to use (if empty or ommitted no
authentication is done.
password: String - [opt] The password to use.
"""
self.Proxy = proxy
self.Port = int(port)
self.Scheme = scheme
self.Username = username
self.Password = password
self.Filter = [] # : If specified, only URLs that contain these parts will be routed via the proxy.
def GetSmartProxyHandler(self, scheme=None):
""" Gets a Proxy Handler based on the settings
Keyword Arguments:
scheme : String - Can be used to override the scheme
"""
if self.Proxy == "":
proxyHandler = urllib2.ProxyHandler({})
else:
address = self.GetProxyAddress()
proxyHandler = urllib2.ProxyHandler({scheme or self.Scheme: address})
return proxyHandler
def GetProxyAddress(self, hidePassword=False):
""" Returns the proxy address for this proxy
Keyword Arguments:
hidePassword : Boolean - Should we show or hide the password
"""
if self.__IsSecure():
if hidePassword:
return "%s://%s:*******@%s:%s" % (self.Scheme, self.Username, self.Proxy, self.Port)
else:
return "%s://%s:%s@%s:%s" % (self.Scheme, self.Username, self.Password, self.Proxy, self.Port)
else:
return "%s://%s:%s" % (self.Scheme, self.Proxy, self.Port)
def UseProxyForUrl(self, url):
""" Checks whether the URL is allowed based on the proxy filter
Arguments:
url : String - The URL
"""
if not self.Filter:
return True
# if any word in the filterlist appears in the url, use the proxy
return any(f in url for f in self.Filter)
def __IsSecure(self):
""" An easy way of determining if this server should use proxy authentication."""
return not self.Username == ""
def __str__(self):
""" returns a string representation """
if self.Proxy == "":
return "Proxy Default Override."
return "Proxy (%s): %s" % (self.Scheme, self.GetProxyAddress(True))
| [
"#===============================================================================\r\n",
"# LICENSE XOT-Framework - CC BY-NC-ND\r\n",
"#===============================================================================\r\n",
"# This work is licenced under the Creative Commons\r\n",
"# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a\r\n",
"# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/\r\n",
"# or send a letter to Creative Commons, 171 Second Street, Suite 300,\r\n",
"# San Francisco, California 94105, USA.\r\n",
"#===============================================================================\r\n",
"\r\n",
"import urllib2\r\n",
"\r\n",
"\r\n",
"class ProxyInfo:\r\n",
" def __init__(self, proxy, port, scheme=\"http\", username=\"\", password=\"\"):\r\n",
" \"\"\" Retrieves a new ProxyInfo object\r\n",
"\r\n",
" Arguments:\r\n",
" proxy: String - Name or IP of the Proxy server\r\n",
" port: Int - The port of the proxy server\r\n",
"\r\n",
" Keyword Arguments:\r\n",
" scheme: String - [opt] The type of proxy (http is default)\r\n",
" username: String - [opt] The username to use (if empty or ommitted no\r\n",
" authentication is done.\r\n",
" password: String - [opt] The password to use.\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" self.Proxy = proxy\r\n",
" self.Port = int(port)\r\n",
" self.Scheme = scheme\r\n",
" self.Username = username\r\n",
" self.Password = password\r\n",
" self.Filter = [] # : If specified, only URLs that contain these parts will be routed via the proxy.\r\n",
"\r\n",
" def GetSmartProxyHandler(self, scheme=None):\r\n",
" \"\"\" Gets a Proxy Handler based on the settings\r\n",
"\r\n",
" Keyword Arguments:\r\n",
" scheme : String - Can be used to override the scheme\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" if self.Proxy == \"\":\r\n",
" proxyHandler = urllib2.ProxyHandler({})\r\n",
" else:\r\n",
" address = self.GetProxyAddress()\r\n",
" proxyHandler = urllib2.ProxyHandler({scheme or self.Scheme: address})\r\n",
"\r\n",
" return proxyHandler\r\n",
"\r\n",
" def GetProxyAddress(self, hidePassword=False):\r\n",
" \"\"\" Returns the proxy address for this proxy\r\n",
"\r\n",
" Keyword Arguments:\r\n",
" hidePassword : Boolean - Should we show or hide the password\r\n",
"\r\n",
" \"\"\"\r\n",
" if self.__IsSecure():\r\n",
" if hidePassword:\r\n",
" return \"%s://%s:*******@%s:%s\" % (self.Scheme, self.Username, self.Proxy, self.Port)\r\n",
" else:\r\n",
" return \"%s://%s:%s@%s:%s\" % (self.Scheme, self.Username, self.Password, self.Proxy, self.Port)\r\n",
" else:\r\n",
" return \"%s://%s:%s\" % (self.Scheme, self.Proxy, self.Port)\r\n",
"\r\n",
" def UseProxyForUrl(self, url):\r\n",
" \"\"\" Checks whether the URL is allowed based on the proxy filter\r\n",
"\r\n",
" Arguments:\r\n",
" url : String - The URL\r\n",
"\r\n",
" \"\"\"\r\n",
" if not self.Filter:\r\n",
" return True\r\n",
"\r\n",
" # if any word in the filterlist appears in the url, use the proxy\r\n",
" return any(f in url for f in self.Filter)\r\n",
"\r\n",
" def __IsSecure(self):\r\n",
" \"\"\" An easy way of determining if this server should use proxy authentication.\"\"\"\r\n",
"\r\n",
" return not self.Username == \"\"\r\n",
"\r\n",
" def __str__(self):\r\n",
" \"\"\" returns a string representation \"\"\"\r\n",
"\r\n",
" if self.Proxy == \"\":\r\n",
" return \"Proxy Default Override.\"\r\n",
"\r\n",
" return \"Proxy (%s): %s\" % (self.Scheme, self.GetProxyAddress(True))\r\n"
] | [
0.024390243902439025,
0,
0.024390243902439025,
0,
0.012195121951219513,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0.008928571428571428,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 92 | 0.001472 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# set this to run as a cronjob (after backup has completed)
# to regularly remove indexes
# .conf file will determine what indexes are operated on
# Create a starter .conf file with backupDiscover.py
import sys
from datetime import datetime, timedelta
from configlib import getConfig, OptionParser
from mozdef_util.utilities.logger import logger
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.elasticsearch_client import ElasticsearchClient
def esCloseIndices():
logger.debug('started')
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_open_indices()
except Exception as e:
logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))
# examine each index pulled from get_indice
# to determine if it meets aging criteria
month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
month_ago_date = month_ago_date.replace(tzinfo=None)
for index in indices:
if 'events' in index:
index_date = index.rsplit('-', 1)[1]
logger.debug("Checking to see if Index: %s can be closed." % (index))
if len(index_date) == 8:
index_date_obj = datetime.strptime(index_date, '%Y%m%d')
try:
if month_ago_date > index_date_obj:
logger.debug("Index: %s will be closed." % (index))
es.close_index(index)
else:
logger.debug("Index: %s does not meet aging criteria and will not be closed." % (index))
except Exception as e:
logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
def initConfig():
# output our log to stdout or syslog
options.output = getConfig(
'output',
'stdout',
options.configfile
)
# syslog hostname
options.sysloghostname = getConfig(
'sysloghostname',
'localhost',
options.configfile
)
options.syslogport = getConfig(
'syslogport',
514,
options.configfile
)
options.esservers = list(getConfig(
'esservers',
'http://localhost:9200',
options.configfile).split(',')
)
options.index_age = getConfig(
'index_age',
15,
options.configfile
)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
esCloseIndices()
| [
"#!/usr/bin/env python\n",
"\n",
"# This Source Code Form is subject to the terms of the Mozilla Public\n",
"# License, v. 2.0. If a copy of the MPL was not distributed with this\n",
"# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n",
"# Copyright (c) 2014 Mozilla Corporation\n",
"\n",
"# set this to run as a cronjob (after backup has completed)\n",
"# to regularly remove indexes\n",
"\n",
"# .conf file will determine what indexes are operated on\n",
"# Create a starter .conf file with backupDiscover.py\n",
"\n",
"import sys\n",
"from datetime import datetime, timedelta\n",
"from configlib import getConfig, OptionParser\n",
"\n",
"from mozdef_util.utilities.logger import logger\n",
"from mozdef_util.utilities.toUTC import toUTC\n",
"from mozdef_util.elasticsearch_client import ElasticsearchClient\n",
"\n",
"\n",
"def esCloseIndices():\n",
" logger.debug('started')\n",
" try:\n",
" es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))\n",
" indices = es.get_open_indices()\n",
" except Exception as e:\n",
" logger.error(\"Unhandled exception while connecting to ES, terminating: %r\" % (e))\n",
"\n",
" # examine each index pulled from get_indice\n",
" # to determine if it meets aging criteria\n",
" month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))\n",
" month_ago_date = month_ago_date.replace(tzinfo=None)\n",
" for index in indices:\n",
" if 'events' in index:\n",
" index_date = index.rsplit('-', 1)[1]\n",
" logger.debug(\"Checking to see if Index: %s can be closed.\" % (index))\n",
" if len(index_date) == 8:\n",
" index_date_obj = datetime.strptime(index_date, '%Y%m%d')\n",
" try:\n",
" if month_ago_date > index_date_obj:\n",
" logger.debug(\"Index: %s will be closed.\" % (index))\n",
" es.close_index(index)\n",
" else:\n",
" logger.debug(\"Index: %s does not meet aging criteria and will not be closed.\" % (index))\n",
" except Exception as e:\n",
" logger.error(\"Unhandled exception while closing indices, terminating: %r\" % (e))\n",
"\n",
"\n",
"def initConfig():\n",
" # output our log to stdout or syslog\n",
" options.output = getConfig(\n",
" 'output',\n",
" 'stdout',\n",
" options.configfile\n",
" )\n",
" # syslog hostname\n",
" options.sysloghostname = getConfig(\n",
" 'sysloghostname',\n",
" 'localhost',\n",
" options.configfile\n",
" )\n",
" options.syslogport = getConfig(\n",
" 'syslogport',\n",
" 514,\n",
" options.configfile\n",
" )\n",
" options.esservers = list(getConfig(\n",
" 'esservers',\n",
" 'http://localhost:9200',\n",
" options.configfile).split(',')\n",
" )\n",
" options.index_age = getConfig(\n",
" 'index_age',\n",
" 15,\n",
" options.configfile\n",
" )\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" parser = OptionParser()\n",
" parser.add_option(\"-c\",\n",
" dest='configfile',\n",
" default=sys.argv[0].replace('.py', '.conf'),\n",
" help=\"configuration file to use\")\n",
" (options, args) = parser.parse_args()\n",
" initConfig()\n",
" esCloseIndices()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.011111111111111112,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 89 | 0.000738 |
from BinarySearchTree import BinarySearchTree as _BinarySearchTree
_DEBUG = 0
def _identifyNephews(sibling, parent):
if sibling == parent.getLeft():
return sibling.getRight(),sibling.getLeft()
else:
return sibling.getLeft(),sibling.getRight()
class RedBlackTree(_BinarySearchTree):
"""A red-black tree for storing arbitrary (key,data) pairs.
This particular implementation relies upon storing all data at
leaves of the tree rather than at internal nodes.
Entries with equal keys will be stored consolidated at a single leaf.
"""
#####################################################################
class _Node(_BinarySearchTree._Node):
"""Structure for single node of tree.
Each node has a key, a left child and a right child.
Data is stored at leaves and as complete hack, we designate
this by having left and right pointers equal, and pointing to
the data.
"""
def __init__(self, key=None):
"""Creates a leaf with no data entries.
key is None if not otherwise specified.
color is initially black.
"""
_BinarySearchTree._Node.__init__(self,key) # parent constructor
self._black = True
def isBlack(self):
return self._black
def isRed(self):
return not self._black
def setBlack(self,yes=True):
self._black = yes
def __str__(self):
if self._black:
color = 'black'
else:
color = 'red'
return _BinarySearchTree._Node.__str__(self) + " and color %s"%color
#####################################################################
def _fixupInsert(self, path):
path.pop() # end should be a leaf (black by default)
if path:
path[-1].setBlack(False) # this presumed new internal should be set to red
good = False
while not good:
good = True # generally the case
if len(path) == 1:
path[-1].setBlack() # root should be black
else:
parent = path[-2]
if parent.isRed():
grandparent = path[-3] # must exist, since root is never red
uncle = grandparent.getOtherChild(parent)
if uncle.isBlack():
if _DEBUG>1: print "misshapen 4-node"
# poorly shaped 4-node
if (grandparent.getLeft() == parent) != (parent.getLeft() == path[-1]):
# crooked alignment requires extra rotation
if _DEBUG>1: print "extra rotate"
self._rotate(path[-1],parent,grandparent)
path[-2:] = [path[-1], path[-2]] # invert last two entries
if _DEBUG>1: print "extra rotate"
# in either case, need to rotate parent with grandparent
grandparent.setBlack(False)
path[-2].setBlack(True)
path[-1].setBlack(False)
if len(path)==3:
if _DEBUG>1: print "rotate (no grandparent)"
self._rotate(path[-2],path[-3])
else:
if _DEBUG>1: print "rotate"
self._rotate(path[-2],path[-3],path[-4])
else:
# 5-node must be recolored
if _DEBUG>1: print "recoloring 5-node"
parent.setBlack()
uncle.setBlack()
grandparent.setBlack(False)
# continue from grandparent
path.pop()
path.pop()
good = False
if _DEBUG>0 and self._validate() == -1:
print 'Error after insertion.'
def _removeLeaf(self, path):
"""Last node of path is a leaf that should be removed."""
problem = len(path) >= 2 and path[-2].isBlack()
_BinarySearchTree._contractAbove(self,path) # path is updated automatically
while problem:
problem = False # typically, we fix it. We'll reset to True when necessary
if path[-1].isRed():
path[-1].setBlack() # problem solved
elif len(path) >= 2:
# bottom node is a "double-black" that must be remedied
if _DEBUG>1: print "double-black node must be resolved:",path[-1]
parent = path[-2]
sibling = parent.getOtherChild(path[-1])
if len(path) >= 3:
grandparent = path[-3]
else:
grandparent = None
if sibling.isRed():
# our parent is a 3-node that we prefer to realign
if _DEBUG>1: print "realigning red sibling"
sibling.setBlack(True)
parent.setBlack(False)
self._rotate(sibling,parent,grandparent)
path.insert(-2,sibling) # reflects the rotation of sibling above parent
grandparent = sibling
sibling = parent.getOtherChild(path[-1]) # will surely be black this time
# now sibling is black
nephewA,nephewB = _identifyNephews(sibling,parent) # closer,farther
if _DEBUG>1: print "nephews:",nephewA,"-",nephewB
if nephewA.isBlack() and nephewB.isBlack():
# we and sibling are 2-nodes. Recolor sibling to enact merge
if _DEBUG>1: print "sibling also 2-node; recoloring"
sibling.setBlack(False)
if parent.isRed():
parent.setBlack()
else:
if _DEBUG>1: print "will continue with",path[-1]
path.pop()
problem = True # must continue from parent level
else:
# should be able to maneuver to borrow from sibling
if not nephewA.isRed():
# rotate other nephew and sibling
if _DEBUG>1: print "realigning nephews"
self._rotate(nephewB,sibling,parent)
nephewB.setBlack(True)
sibling.setBlack(False)
sibling = nephewB
nephewA,nephewB = _identifyNephews(sibling,parent)
if _DEBUG>1: print "nephews:",nephewA,"-",nephewB
# at this point, nephewA is guaranteed to be red. Let's borrow from it
self._rotate(nephewA,sibling,parent)
self._rotate(nephewA,parent,grandparent)
nephewA.setBlack(parent.isBlack()) # they've been promoted
parent.setBlack()
# cross your fingers; should be done!
if _DEBUG>0 and self._validate() == -1:
print 'Error after deletion.'
def _validate(self,here=None,prevBlack=True):
"""Returns the black depth if valid; -1 if invalid."""
if here is None:
here = self._root
if here is None:
answer = 0
elif here.isExternal():
if here.isRed():
answer = -1
else:
answer = 1
else:
if here.isRed() and not prevBlack:
answer = -1 # should not have two reds in a row
else:
leftDepth = self._validate(here.getLeft(),here.isBlack())
rightDepth = self._validate(here.getRight(),here.isBlack())
if leftDepth == -1 or rightDepth == -1 or leftDepth != rightDepth:
answer = -1
else:
if here.isBlack():
answer = 1 + leftDepth
else:
answer = leftDepth
return answer
if __name__ == '__main__':
from BinarySearchTree import _test
_test(RedBlackTree(),10000,_DEBUG)
| [
"from BinarySearchTree import BinarySearchTree as _BinarySearchTree\n",
"\n",
"_DEBUG = 0\n",
"\n",
"def _identifyNephews(sibling, parent):\n",
" if sibling == parent.getLeft():\n",
" return sibling.getRight(),sibling.getLeft()\n",
" else:\n",
" return sibling.getLeft(),sibling.getRight()\n",
"\n",
"\n",
"class RedBlackTree(_BinarySearchTree):\n",
" \"\"\"A red-black tree for storing arbitrary (key,data) pairs.\n",
"\n",
" This particular implementation relies upon storing all data at\n",
" leaves of the tree rather than at internal nodes.\n",
"\n",
" Entries with equal keys will be stored consolidated at a single leaf.\n",
" \"\"\"\n",
"\n",
" #####################################################################\n",
" class _Node(_BinarySearchTree._Node):\n",
" \"\"\"Structure for single node of tree.\n",
"\n",
" Each node has a key, a left child and a right child.\n",
"\n",
" Data is stored at leaves and as complete hack, we designate\n",
" this by having left and right pointers equal, and pointing to\n",
" the data.\n",
" \"\"\"\n",
" def __init__(self, key=None):\n",
" \"\"\"Creates a leaf with no data entries.\n",
"\n",
" key is None if not otherwise specified.\n",
" color is initially black.\n",
" \"\"\"\n",
" _BinarySearchTree._Node.__init__(self,key) # parent constructor\n",
" self._black = True\n",
"\n",
" def isBlack(self):\n",
" return self._black\n",
"\n",
" def isRed(self):\n",
" return not self._black\n",
"\n",
" def setBlack(self,yes=True):\n",
" self._black = yes\n",
"\n",
" def __str__(self):\n",
" if self._black:\n",
" color = 'black'\n",
" else:\n",
" color = 'red'\n",
" return _BinarySearchTree._Node.__str__(self) + \" and color %s\"%color\n",
" \n",
" #####################################################################\n",
"\n",
" \n",
" def _fixupInsert(self, path):\n",
" path.pop() # end should be a leaf (black by default)\n",
" if path:\n",
" path[-1].setBlack(False) # this presumed new internal should be set to red\n",
" good = False\n",
" while not good:\n",
" good = True # generally the case\n",
" if len(path) == 1:\n",
" path[-1].setBlack() # root should be black\n",
" else:\n",
" parent = path[-2]\n",
" if parent.isRed():\n",
" grandparent = path[-3] # must exist, since root is never red\n",
" uncle = grandparent.getOtherChild(parent)\n",
" if uncle.isBlack():\n",
" if _DEBUG>1: print \"misshapen 4-node\"\n",
" # poorly shaped 4-node\n",
" if (grandparent.getLeft() == parent) != (parent.getLeft() == path[-1]):\n",
" # crooked alignment requires extra rotation\n",
" if _DEBUG>1: print \"extra rotate\"\n",
" self._rotate(path[-1],parent,grandparent)\n",
" path[-2:] = [path[-1], path[-2]] # invert last two entries\n",
" if _DEBUG>1: print \"extra rotate\"\n",
" # in either case, need to rotate parent with grandparent\n",
" grandparent.setBlack(False)\n",
" path[-2].setBlack(True)\n",
" path[-1].setBlack(False)\n",
" if len(path)==3:\n",
" if _DEBUG>1: print \"rotate (no grandparent)\"\n",
" self._rotate(path[-2],path[-3])\n",
" else:\n",
" if _DEBUG>1: print \"rotate\"\n",
" self._rotate(path[-2],path[-3],path[-4])\n",
" else:\n",
" # 5-node must be recolored\n",
" if _DEBUG>1: print \"recoloring 5-node\"\n",
" parent.setBlack()\n",
" uncle.setBlack()\n",
" grandparent.setBlack(False)\n",
" # continue from grandparent\n",
" path.pop()\n",
" path.pop()\n",
" good = False\n",
"\n",
" if _DEBUG>0 and self._validate() == -1:\n",
" print 'Error after insertion.'\n",
"\n",
"\n",
" def _removeLeaf(self, path):\n",
" \"\"\"Last node of path is a leaf that should be removed.\"\"\"\n",
" problem = len(path) >= 2 and path[-2].isBlack()\n",
" _BinarySearchTree._contractAbove(self,path) # path is updated automatically\n",
" while problem:\n",
" problem = False # typically, we fix it. We'll reset to True when necessary\n",
" if path[-1].isRed():\n",
" path[-1].setBlack() # problem solved\n",
" elif len(path) >= 2:\n",
" # bottom node is a \"double-black\" that must be remedied\n",
" if _DEBUG>1: print \"double-black node must be resolved:\",path[-1]\n",
" parent = path[-2]\n",
" sibling = parent.getOtherChild(path[-1])\n",
" if len(path) >= 3:\n",
" grandparent = path[-3]\n",
" else:\n",
" grandparent = None\n",
" \n",
" if sibling.isRed():\n",
" # our parent is a 3-node that we prefer to realign\n",
" if _DEBUG>1: print \"realigning red sibling\"\n",
" sibling.setBlack(True)\n",
" parent.setBlack(False)\n",
" self._rotate(sibling,parent,grandparent)\n",
" path.insert(-2,sibling) # reflects the rotation of sibling above parent\n",
" grandparent = sibling\n",
" sibling = parent.getOtherChild(path[-1]) # will surely be black this time\n",
"\n",
" # now sibling is black\n",
" nephewA,nephewB = _identifyNephews(sibling,parent) # closer,farther\n",
" if _DEBUG>1: print \"nephews:\",nephewA,\"-\",nephewB\n",
" if nephewA.isBlack() and nephewB.isBlack():\n",
" # we and sibling are 2-nodes. Recolor sibling to enact merge\n",
" if _DEBUG>1: print \"sibling also 2-node; recoloring\"\n",
" sibling.setBlack(False)\n",
" if parent.isRed():\n",
" parent.setBlack()\n",
" else:\n",
" if _DEBUG>1: print \"will continue with\",path[-1]\n",
" path.pop()\n",
" problem = True # must continue from parent level\n",
" else:\n",
" # should be able to maneuver to borrow from sibling\n",
" if not nephewA.isRed():\n",
" # rotate other nephew and sibling\n",
" if _DEBUG>1: print \"realigning nephews\"\n",
" self._rotate(nephewB,sibling,parent)\n",
" nephewB.setBlack(True)\n",
" sibling.setBlack(False)\n",
" sibling = nephewB\n",
" nephewA,nephewB = _identifyNephews(sibling,parent)\n",
" if _DEBUG>1: print \"nephews:\",nephewA,\"-\",nephewB\n",
"\n",
" # at this point, nephewA is guaranteed to be red. Let's borrow from it\n",
" self._rotate(nephewA,sibling,parent)\n",
" self._rotate(nephewA,parent,grandparent)\n",
" nephewA.setBlack(parent.isBlack()) # they've been promoted\n",
" parent.setBlack()\n",
" # cross your fingers; should be done!\n",
"\n",
" if _DEBUG>0 and self._validate() == -1:\n",
" print 'Error after deletion.'\n",
"\n",
" def _validate(self,here=None,prevBlack=True):\n",
" \"\"\"Returns the black depth if valid; -1 if invalid.\"\"\"\n",
" if here is None:\n",
" here = self._root\n",
" if here is None:\n",
" answer = 0\n",
" elif here.isExternal():\n",
" if here.isRed():\n",
" answer = -1\n",
" else:\n",
" answer = 1\n",
" else:\n",
" if here.isRed() and not prevBlack:\n",
" answer = -1 # should not have two reds in a row\n",
" else:\n",
" leftDepth = self._validate(here.getLeft(),here.isBlack())\n",
" rightDepth = self._validate(here.getRight(),here.isBlack())\n",
" if leftDepth == -1 or rightDepth == -1 or leftDepth != rightDepth:\n",
" answer = -1\n",
" else:\n",
" if here.isBlack():\n",
" answer = 1 + leftDepth\n",
" else:\n",
" answer = leftDepth\n",
" return answer\n",
" \n",
"if __name__ == '__main__':\n",
" from BinarySearchTree import _test\n",
" _test(RedBlackTree(),10000,_DEBUG)\n",
" \n"
] | [
0,
0,
0,
0,
0.02564102564102564,
0,
0.019230769230769232,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0.1111111111111111,
0,
0,
0.2,
0.029411764705882353,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.030303030303030304,
0,
0.01,
0,
0.030303030303030304,
0.02702702702702703,
0.010869565217391304,
0.030303030303030304,
0.011764705882352941,
0,
0,
0,
0.022222222222222223,
0.025974025974025976,
0.015625,
0,
0.03333333333333333,
0.0273972602739726,
0,
0,
0.029850746268656716,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0.030303030303030304,
0,
0,
0.02197802197802198,
0,
0.011363636363636364,
0,
0,
0,
0,
0.04878048780487805,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0.03125,
0,
0,
0.03278688524590164,
0.02127659574468085,
0,
0.010526315789473684,
0,
0,
0.03488372093023256,
0.07575757575757576,
0,
0.012195121951219513,
0.0273972602739726,
0,
0,
0,
0,
0.0410958904109589,
0,
0,
0,
0,
0,
0,
0.03125,
0.03278688524590164,
0,
0,
0,
0.02666666666666667,
0.06756756756756757,
0,
0.01098901098901099,
0.03508771929824561,
0.03278688524590164,
0.012345679012345678,
0,
0,
0,
0.020833333333333332,
0,
0,
0.04,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0.013513513513513514,
0.013157894736842105,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.037037037037037035,
0,
0.05128205128205128,
0.2
] | 199 | 0.010954 |
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(tbachman) Figure out a better/common place for this
AGENT_TYPE_DVS = 'DVS agent'
APIC_SYNC_NETWORK = 'apic-sync-network'
HOST_SNAT_NETWORK_PREFIX = 'host-snat-network-for-internal-use-'
HOST_SNAT_POOL = 'host-snat-pool-for-internal-use'
HOST_SNAT_POOL_PORT = 'host-snat-pool-port-for-internal-use'
DEVICE_OWNER_SNAT_PORT = 'host-snat-pool-port-device-owner-internal-use'
# TODO(tbachman) figure out a better/common place for this
VIF_TYPE_DVS = "dvs"
| [
"# Copyright (c) 2014 Cisco Systems Inc.\n",
"# All Rights Reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"# TODO(tbachman) Figure out a better/common place for this\n",
"AGENT_TYPE_DVS = 'DVS agent'\n",
"APIC_SYNC_NETWORK = 'apic-sync-network'\n",
"HOST_SNAT_NETWORK_PREFIX = 'host-snat-network-for-internal-use-'\n",
"HOST_SNAT_POOL = 'host-snat-pool-for-internal-use'\n",
"HOST_SNAT_POOL_PORT = 'host-snat-pool-port-for-internal-use'\n",
"DEVICE_OWNER_SNAT_PORT = 'host-snat-pool-port-device-owner-internal-use'\n",
"# TODO(tbachman) figure out a better/common place for this\n",
"VIF_TYPE_DVS = \"dvs\"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 24 | 0 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from polymorphic.models import PolymorphicModel
from autoslug import AutoSlugField
from rest_framework.response import Response
from rest_framework import status
from cms.common import mixins, utils
from cms.common.fields import LanguageField
from cms.content.models import Category, Article
from cms.content.serializers import CategorySerializer, ArticleSerializer
class Page(PolymorphicModel):
title = models.CharField(_('title'), max_length=255)
slug = AutoSlugField(
_('slug'), populate_from='title', unique_with='language',
editable=True, blank=True
)
published = models.BooleanField(_('published'), default=True)
order = models.PositiveSmallIntegerField(_('order'), default=0)
homepage = models.BooleanField(_('homepage'), default=False)
react_component_name = models.CharField(
_('react component name'), help_text=_('Leave it blank to use default '
'component.'),
max_length=255, null=True, blank=True
)
language = LanguageField(_('language'))
class Meta:
app_label = 'pages'
ordering = ('language', 'order', 'title')
verbose_name = _('page')
verbose_name_plural = _('pages')
def __str__(self):
return self.title
@property
def route(self):
if self.homepage:
return '/'
return '/{}'.format(self.slug)
@property
def all_routes(self):
if not self.pk:
return None
if self.homepage:
pathWithoutLang = '/'
else:
pathWithoutLang = '/{}'.format(self.slug)
routes = []
if self.language == settings.LANGUAGES[0][0]: # default language
routes.append(pathWithoutLang)
elif self.language == 'any': # any language
for lang in settings.LANGUAGES:
if lang[0] == settings.LANGUAGES[0][0]:
routes.append(pathWithoutLang)
else:
routes.append('/{}{}'.format(lang[0], pathWithoutLang))
else: # other than default language
routes.append('/{}{}'.format(self.language, pathWithoutLang))
return routes
@property
def deps_published(self):
return True
def get_meta(self):
return utils.generate_meta(
title=self.meta_title_override or self.title,
title_suffix=self.meta_title_site_name_suffix,
description=self.meta_description_override,
robots=self.meta_robots_override
)
def get_view(self, request):
if not self.deps_published:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response({
'component_name': (
self.react_component_name or self.DEFAULT_REACT_COMPONENT_NAME
),
'component_data': self.get_component_data(request),
'meta': self.get_meta()
})
class PageCategory(Page):
DEFAULT_REACT_COMPONENT_NAME = 'Category'
category = models.ForeignKey(
Category, verbose_name=_('category'), related_name='pages'
)
class Meta:
app_label = 'pages'
verbose_name = _('category page')
verbose_name_plural = _('category pages')
@property
def deps_published(self):
return self.category.published
def get_component_data(self, request):
return CategorySerializer(
self.category, context={'request': request}
).data
def get_meta(self):
return self.category.meta
class PageArticle(Page):
DEFAULT_REACT_COMPONENT_NAME = 'Article'
article = models.ForeignKey(
Article, verbose_name=_('article'), related_name='pages'
)
class Meta:
app_label = 'pages'
verbose_name = _('article page')
verbose_name_plural = _('article pages')
@property
def deps_published(self):
return self.article.published
def get_component_data(self, request):
return ArticleSerializer(
self.article, context={'request': request}
).data
def get_meta(self):
return self.article.meta
class PageCustomComponent(Page, mixins.Seo):
DEFAULT_REACT_COMPONENT_NAME = ''
class Meta:
app_label = 'pages'
verbose_name = _('custom component page')
verbose_name_plural = _('custom component pages')
def get_component_data(self, request):
return {}
| [
"from django.db import models\n",
"from django.utils.translation import ugettext_lazy as _\n",
"from django.conf import settings\n",
"\n",
"from polymorphic.models import PolymorphicModel\n",
"from autoslug import AutoSlugField\n",
"from rest_framework.response import Response\n",
"from rest_framework import status\n",
"\n",
"from cms.common import mixins, utils\n",
"from cms.common.fields import LanguageField\n",
"from cms.content.models import Category, Article\n",
"from cms.content.serializers import CategorySerializer, ArticleSerializer\n",
"\n",
"\n",
"class Page(PolymorphicModel):\n",
" title = models.CharField(_('title'), max_length=255)\n",
" slug = AutoSlugField(\n",
" _('slug'), populate_from='title', unique_with='language',\n",
" editable=True, blank=True\n",
" )\n",
" published = models.BooleanField(_('published'), default=True)\n",
" order = models.PositiveSmallIntegerField(_('order'), default=0)\n",
"\n",
" homepage = models.BooleanField(_('homepage'), default=False)\n",
"\n",
" react_component_name = models.CharField(\n",
" _('react component name'), help_text=_('Leave it blank to use default '\n",
" 'component.'),\n",
" max_length=255, null=True, blank=True\n",
" )\n",
"\n",
" language = LanguageField(_('language'))\n",
"\n",
" class Meta:\n",
" app_label = 'pages'\n",
" ordering = ('language', 'order', 'title')\n",
" verbose_name = _('page')\n",
" verbose_name_plural = _('pages')\n",
"\n",
" def __str__(self):\n",
" return self.title\n",
"\n",
" @property\n",
" def route(self):\n",
" if self.homepage:\n",
" return '/'\n",
" return '/{}'.format(self.slug)\n",
"\n",
" @property\n",
" def all_routes(self):\n",
" if not self.pk:\n",
" return None\n",
"\n",
" if self.homepage:\n",
" pathWithoutLang = '/'\n",
" else:\n",
" pathWithoutLang = '/{}'.format(self.slug)\n",
"\n",
" routes = []\n",
" if self.language == settings.LANGUAGES[0][0]: # default language\n",
" routes.append(pathWithoutLang)\n",
" elif self.language == 'any': # any language\n",
" for lang in settings.LANGUAGES:\n",
" if lang[0] == settings.LANGUAGES[0][0]:\n",
" routes.append(pathWithoutLang)\n",
" else:\n",
" routes.append('/{}{}'.format(lang[0], pathWithoutLang))\n",
"\n",
" else: # other than default language\n",
" routes.append('/{}{}'.format(self.language, pathWithoutLang))\n",
"\n",
" return routes\n",
"\n",
" @property\n",
" def deps_published(self):\n",
" return True\n",
"\n",
" def get_meta(self):\n",
" return utils.generate_meta(\n",
" title=self.meta_title_override or self.title,\n",
" title_suffix=self.meta_title_site_name_suffix,\n",
" description=self.meta_description_override,\n",
" robots=self.meta_robots_override\n",
" )\n",
"\n",
" def get_view(self, request):\n",
" if not self.deps_published:\n",
" return Response(status=status.HTTP_404_NOT_FOUND)\n",
" return Response({\n",
" 'component_name': (\n",
" self.react_component_name or self.DEFAULT_REACT_COMPONENT_NAME\n",
" ),\n",
" 'component_data': self.get_component_data(request),\n",
" 'meta': self.get_meta()\n",
" })\n",
"\n",
"\n",
"class PageCategory(Page):\n",
" DEFAULT_REACT_COMPONENT_NAME = 'Category'\n",
"\n",
" category = models.ForeignKey(\n",
" Category, verbose_name=_('category'), related_name='pages'\n",
" )\n",
"\n",
" class Meta:\n",
" app_label = 'pages'\n",
" verbose_name = _('category page')\n",
" verbose_name_plural = _('category pages')\n",
"\n",
" @property\n",
" def deps_published(self):\n",
" return self.category.published\n",
"\n",
" def get_component_data(self, request):\n",
" return CategorySerializer(\n",
" self.category, context={'request': request}\n",
" ).data\n",
"\n",
" def get_meta(self):\n",
" return self.category.meta\n",
"\n",
"\n",
"class PageArticle(Page):\n",
" DEFAULT_REACT_COMPONENT_NAME = 'Article'\n",
"\n",
" article = models.ForeignKey(\n",
" Article, verbose_name=_('article'), related_name='pages'\n",
" )\n",
"\n",
" class Meta:\n",
" app_label = 'pages'\n",
" verbose_name = _('article page')\n",
" verbose_name_plural = _('article pages')\n",
"\n",
" @property\n",
" def deps_published(self):\n",
" return self.article.published\n",
"\n",
" def get_component_data(self, request):\n",
" return ArticleSerializer(\n",
" self.article, context={'request': request}\n",
" ).data\n",
"\n",
" def get_meta(self):\n",
" return self.article.meta\n",
"\n",
"\n",
"class PageCustomComponent(Page, mixins.Seo):\n",
" DEFAULT_REACT_COMPONENT_NAME = ''\n",
"\n",
" class Meta:\n",
" app_label = 'pages'\n",
" verbose_name = _('custom component page')\n",
" verbose_name_plural = _('custom component pages')\n",
"\n",
" def get_component_data(self, request):\n",
" return {}\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 158 | 0 |
"""
Copyright (c) 2012, Thomas M. Farrelly
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class META :
NAME = 'unamed'
HEADER = ''
DECL = ''
TYPES = []
def BUILD() :
result_h = ''
result_c = META.HEADER
result_h += ( ''.join( [
'\nstruct ' + c.name + '_struct ;' +
'\ntypedef struct ' + c.name + '_struct * ' + c.name + ' ;'
for c in T.classes ] ) )
result_h += META.DECL
result_h += ( ''.join( [
'\nTYPE ' + c.name + '_type ;'
for c in T.classes ] ) )
result_h += ( ''.join( [
'\nWID WI_' + stringtocid( w ) + ' ;'
for w in T.wids ] ) )
result_h += ( ''.join( [
'\ninline ' + c.name + ' new' + c.name + '( ' +
( ', '.join( [
a.t + ' ' + a.n
for a in c.attributes ] ) ) + ' ) ;' +
'\nstruct ' + c.name + '_struct {' +
'\n n_void (*objective)( TASK ) ;'
'\n n_string (*debug)( ANY ) ;' +
( ''.join( [
'\n ' + a.t + ' ' + a.n + ' ;'
for a in c.attributes ] ) ) + '\n} ;'
for c in T.classes ] ) )
result_c += ( ''.join( [
'\n#define THIS c(' + c.name +',task->action->value)' +
'\n#define THIS_R newREFERENCE( ' + c.name + '_type, task->context->this->value, any(THIS) )' +
'\n#define DO_TYPE_ID_TEST TYPE_RESPONSE(' + c.name + '_type )'
'\n#define PSTHIS ' + ( ( 'C(' + c.t1 + ',' ) if c.t1 else 'any(' ) + 'task->context->this->svalue)' +
'\n#define PSTHAT ' + ( ( 'C(' + c.t2 + ',' ) if c.t2 else 'any(' ) + 'task->context->that->svalue)' +
'\nn_void ' + c.name + '_objective( TASK task ) {' + c.objective + '}' +
'\n#undef PSTHAT' +
'\n#undef PSTHIS' +
'\n#undef DO_TYPE_ID_TEST' +
'\n#undef THIS_R' +
'\n#undef THIS' +
'\nn_string debug' + c.name + '( ANY o ) {' +
'\n char * s ;' +
'\n asprintf( &s, "[%04zx:%s' + c.debug.f + ']", c( size_t, o ) >> 4 & 0xfff, "' + c.name + '"' +
( ''.join( [
', ' + d
for d in c.debug.d ] ) ) + ' ) ;' +
'\n return s ;' +
'\n}' +
(
'\ninline ' + c.name + ' new' + c.name + '( ' +
( ', '.join( [
a.t + ' ' + a.n
for a in c.attributes ] ) ) + ' ) {' +
'\n ' + c.name + ' new_object = ALLOCATE( struct ' + c.name + '_struct ) ;' +
'\n new_object->objective = ' + c.name + '_objective ;' +
'\n new_object->debug = debug' + c.name + ' ;' +
( ''.join( [
'\n new_object->' + a.n + ' = ' + a.n + ' ;'
for a in c.attributes ] ) ) +
'\n return new_object ;' +
'\n}'
)
for c in T.classes ] ) + '' )
result_h += '\nn_void INITIALIZE_' + META.NAME + '_TYPES() ;'
result_c += ( '\nn_void INITIALIZE_' + META.NAME + '_TYPES() {' + ( ''.join( [
'\n ' + c.name + '_type = newTYPE( newTID(), ' + c.name + '_objective, any(NONE), any(NONE) ) ; '
for c in T.classes ] ) ) + '\n}' )
result_h += '\nn_void INITIALIZE_' + META.NAME + '_WIDS() ;'
result_c += ( '\nn_void INITIALIZE_' + META.NAME + '_WIDS() {' +
'\n WIDS = listNEW ; ' + ( ''.join( [
'\n WI_' + stringtocid( w ) + ' = widNEW( "' + w + '" ) ; '
for w in T.wids ] ) ) + '\n}' )
result_h += '\n\n'
result_c += '\n\n'
open( META.NAME + '.h', 'w' ).write( result_h ) ;
open( META.NAME + '.c', 'w' ).write( result_c ) ;
class D:
def __init__( self, f = '', *d ) :
self.f = f
self.d = d
class A:
def __init__( self, t, n ) :
self.t = t
self.n = n
class T:
classes = []
wids = []
def __init__( self, name, t1 = None, t2 = None, attributes = (), objective = "", debug = D() ) :
self.name = name
self.t1 = t1
self.t2 = t2
self.attributes = attributes
self.objective = objective
self.debug = debug
T.classes.append( self )
def stringtocid( s ) :
for a, b in {
'.':'DOT', '!':'EXCLAIM',':':'COLON',
'+':'ADD', '-':'SUB', '*':'MUL', '/':'DIV',
'<':'LT', '=':'EQ', '>':'GT', '%':'PERC'
}.iteritems() :
s = s.replace( a, b )
return s
def W( *wids ) :
T.wids = wids
class X:
def __init__( self, n, c ) :
self.n = n
self.c = c
def P( name, *parameters ) :
T( 'PARAM' + name + '_assort', None, None, (), """
REFERENCE tuple = refNEW( TUPLE_type, any(NONE) ) ;
REFERENCE tuple_ref = ref(tuple) ;
task->next = newTASK( ref(newPARAM%(name)s_0( tuple )), task->context, task->result, task->next, task->exit ) ;
CONTEXT c0 = newCONTEXT( task->context->closure, tuple_ref, ref(newNOUN( task->context->that )) ) ;
task->next = newTASK( tuple_ref, c0, ref(NONE), task->next, task->next ) ;
""" % { 'name':name } )
T( 'PARAM' + name + '_0', None, None, (
A( 'REFERENCE', 'tuple' ),
), """
// OUT( assort 0 ) ;
// %(dbg)s
if ( NOTNONE( THIS->tuple->value ) ) {
if ( C(TUPLE,THIS->tuple->value)->list->length == %(len)s ) {
// OUT( assort 0.1 ) ;
%(decl)s
task->next = newTASK( ref(newPARAM%(name)s_1( %(attr)s )), task->context, task->result, task->next, task->exit ) ;
%(check)s
}
}
""" % { 'name':name, 'len':len(parameters),
'dbg': ( ''.join( [ 'LOG( C(TUPLE,THIS->tuple->value)->list->data[%s]->value ) ;' % str( i ) for i in range( len(parameters) ) ] ) ),
'attr': ( ', '.join( [ p.n + '_ref' for p in parameters ] ) ),
'decl': ( ''.join( [ """
REFERENCE %(name)s_ref = refNEW( %(cls)s_type, any(NONE) ) ;
REFERENCE %(name)s_ref_ref = ref(%(name)s_ref) ;
""" % { 'name':p.n, 'cls':p.c } for p in parameters ] ) ),
'check': ( ''.join( [ """
// LOG( %(name)s_ref_ref ) ;
CONTEXT c%(name)s = newCONTEXT( task->context->closure, %(name)s_ref_ref, ref(newNOUN( C(TUPLE,THIS->tuple->value)->list->data[%(i)s] )) ) ;
task->next = newTASK( %(name)s_ref_ref, c%(name)s, ref(NONE), task->next, task->next ) ;
""" % { 'i':i, 'name':parameters[i].n } for i in range( len(parameters) ) ] ) )
} )
T( 'PARAM' + name + '_1', None, None, [
A( 'REFERENCE', p.n + '_ref' ) for p in parameters
], """
// OUT( assort 1 ) ;
// %(dbg)s
if ( %(test)s ) {
// OUT( assort 1.1 ) ;
RETURN( newPARAM%(name)s_struct( %(pass)s ) ) ;
}
""" % { 'name':name,
'dbg': ( ''.join( [ 'LOG( THIS->%s_ref->value ) ;' % p.n for p in parameters ] ) ),
'test': ( ' && '.join( [ 'NOTNONE( THIS->%s_ref->value )' % p.n for p in parameters ] ) ),
'pass': ( ', '.join( [ 'THIS->%s_ref' % p.n for p in parameters ] ) )
} )
T( 'PARAM' + name + '_struct', None, None, [
A( 'REFERENCE', p.n + '_ref' ) for p in parameters
], """
DO_TYPE ;
%(attr)s
""" % { 'attr': ( ''.join( [ """
ONWID( WI_%(p)s, THIS->%(p)s_ref->value ) ;
""" % { 'p':p.n } for p in parameters ] ) ) } )
| [
"\"\"\"\n",
"Copyright (c) 2012, Thomas M. Farrelly\n",
"\n",
"Permission is hereby granted, free of charge, to any person obtaining a copy of\n",
"this software and associated documentation files (the \"Software\"), to deal in\n",
"the Software without restriction, including without limitation the rights to\n",
"use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n",
"the Software, and to permit persons to whom the Software is furnished to do so,\n",
"subject to the following conditions:\n",
"\n",
"The above copyright notice and this permission notice shall be included in all\n",
"copies or substantial portions of the Software.\n",
"\n",
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n",
"FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n",
"COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n",
"IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n",
"CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n",
"\"\"\"\n",
"\n",
"class META :\n",
" NAME = 'unamed'\n",
" HEADER = ''\n",
" DECL = ''\n",
" TYPES = []\n",
"\n",
"def BUILD() :\n",
" result_h = ''\n",
" result_c = META.HEADER\n",
"\n",
" result_h += ( ''.join( [\n",
" '\\nstruct ' + c.name + '_struct ;' +\n",
" '\\ntypedef struct ' + c.name + '_struct * ' + c.name + ' ;'\n",
" for c in T.classes ] ) )\n",
"\n",
" result_h += META.DECL\n",
"\n",
" result_h += ( ''.join( [\n",
" '\\nTYPE ' + c.name + '_type ;'\n",
" for c in T.classes ] ) )\n",
"\n",
" result_h += ( ''.join( [\n",
" '\\nWID WI_' + stringtocid( w ) + ' ;'\n",
" for w in T.wids ] ) )\n",
"\n",
" result_h += ( ''.join( [\n",
" '\\ninline ' + c.name + ' new' + c.name + '( ' +\n",
" ( ', '.join( [\n",
" a.t + ' ' + a.n\n",
" for a in c.attributes ] ) ) + ' ) ;' +\n",
" '\\nstruct ' + c.name + '_struct {' +\n",
" '\\n n_void (*objective)( TASK ) ;'\n",
" '\\n n_string (*debug)( ANY ) ;' +\n",
" ( ''.join( [\n",
" '\\n ' + a.t + ' ' + a.n + ' ;'\n",
" for a in c.attributes ] ) ) + '\\n} ;'\n",
" for c in T.classes ] ) )\n",
"\n",
" result_c += ( ''.join( [\n",
" '\\n#define THIS c(' + c.name +',task->action->value)' +\n",
" '\\n#define THIS_R newREFERENCE( ' + c.name + '_type, task->context->this->value, any(THIS) )' +\n",
" '\\n#define DO_TYPE_ID_TEST TYPE_RESPONSE(' + c.name + '_type )'\n",
" '\\n#define PSTHIS ' + ( ( 'C(' + c.t1 + ',' ) if c.t1 else 'any(' ) + 'task->context->this->svalue)' +\n",
" '\\n#define PSTHAT ' + ( ( 'C(' + c.t2 + ',' ) if c.t2 else 'any(' ) + 'task->context->that->svalue)' +\n",
" '\\nn_void ' + c.name + '_objective( TASK task ) {' + c.objective + '}' +\n",
" '\\n#undef PSTHAT' +\n",
" '\\n#undef PSTHIS' +\n",
" '\\n#undef DO_TYPE_ID_TEST' +\n",
" '\\n#undef THIS_R' +\n",
" '\\n#undef THIS' +\n",
" '\\nn_string debug' + c.name + '( ANY o ) {' +\n",
" '\\n char * s ;' +\n",
" '\\n asprintf( &s, \"[%04zx:%s' + c.debug.f + ']\", c( size_t, o ) >> 4 & 0xfff, \"' + c.name + '\"' +\n",
" ( ''.join( [\n",
" ', ' + d\n",
" for d in c.debug.d ] ) ) + ' ) ;' +\n",
" '\\n return s ;' +\n",
" '\\n}' +\n",
" (\n",
" '\\ninline ' + c.name + ' new' + c.name + '( ' +\n",
" ( ', '.join( [\n",
" a.t + ' ' + a.n\n",
" for a in c.attributes ] ) ) + ' ) {' +\n",
" '\\n ' + c.name + ' new_object = ALLOCATE( struct ' + c.name + '_struct ) ;' +\n",
" '\\n new_object->objective = ' + c.name + '_objective ;' +\n",
" '\\n new_object->debug = debug' + c.name + ' ;' +\n",
" ( ''.join( [\n",
" '\\n new_object->' + a.n + ' = ' + a.n + ' ;'\n",
" for a in c.attributes ] ) ) +\n",
" '\\n return new_object ;' +\n",
" '\\n}'\n",
" )\n",
" for c in T.classes ] ) + '' )\n",
"\n",
" result_h += '\\nn_void INITIALIZE_' + META.NAME + '_TYPES() ;'\n",
" result_c += ( '\\nn_void INITIALIZE_' + META.NAME + '_TYPES() {' + ( ''.join( [\n",
" '\\n ' + c.name + '_type = newTYPE( newTID(), ' + c.name + '_objective, any(NONE), any(NONE) ) ; '\n",
" for c in T.classes ] ) ) + '\\n}' )\n",
"\n",
" result_h += '\\nn_void INITIALIZE_' + META.NAME + '_WIDS() ;'\n",
" result_c += ( '\\nn_void INITIALIZE_' + META.NAME + '_WIDS() {' +\n",
" '\\n WIDS = listNEW ; ' + ( ''.join( [\n",
" '\\n WI_' + stringtocid( w ) + ' = widNEW( \"' + w + '\" ) ; '\n",
" for w in T.wids ] ) ) + '\\n}' )\n",
"\n",
" result_h += '\\n\\n'\n",
" result_c += '\\n\\n'\n",
"\n",
" open( META.NAME + '.h', 'w' ).write( result_h ) ;\n",
" open( META.NAME + '.c', 'w' ).write( result_c ) ;\n",
"\n",
"class D:\n",
" def __init__( self, f = '', *d ) :\n",
" self.f = f\n",
" self.d = d\n",
"\n",
"class A:\n",
" def __init__( self, t, n ) :\n",
" self.t = t\n",
" self.n = n\n",
"\n",
"class T:\n",
" classes = []\n",
" wids = []\n",
" def __init__( self, name, t1 = None, t2 = None, attributes = (), objective = \"\", debug = D() ) :\n",
" self.name = name\n",
" self.t1 = t1\n",
" self.t2 = t2\n",
" self.attributes = attributes\n",
" self.objective = objective\n",
" self.debug = debug\n",
" T.classes.append( self )\n",
"\n",
"def stringtocid( s ) :\n",
" for a, b in {\n",
" '.':'DOT', '!':'EXCLAIM',':':'COLON',\n",
" '+':'ADD', '-':'SUB', '*':'MUL', '/':'DIV',\n",
" '<':'LT', '=':'EQ', '>':'GT', '%':'PERC'\n",
" }.iteritems() :\n",
" s = s.replace( a, b )\n",
" return s\n",
"\n",
"def W( *wids ) :\n",
" T.wids = wids\n",
"\n",
"class X:\n",
" def __init__( self, n, c ) :\n",
" self.n = n\n",
" self.c = c\n",
" \n",
"\n",
"def P( name, *parameters ) :\n",
"\n",
" T( 'PARAM' + name + '_assort', None, None, (), \"\"\"\n",
" REFERENCE tuple = refNEW( TUPLE_type, any(NONE) ) ;\n",
" REFERENCE tuple_ref = ref(tuple) ;\n",
" task->next = newTASK( ref(newPARAM%(name)s_0( tuple )), task->context, task->result, task->next, task->exit ) ;\n",
" CONTEXT c0 = newCONTEXT( task->context->closure, tuple_ref, ref(newNOUN( task->context->that )) ) ;\n",
" task->next = newTASK( tuple_ref, c0, ref(NONE), task->next, task->next ) ;\n",
" \"\"\" % { 'name':name } )\n",
"\n",
" T( 'PARAM' + name + '_0', None, None, (\n",
" A( 'REFERENCE', 'tuple' ),\n",
" ), \"\"\"\n",
"// OUT( assort 0 ) ;\n",
"// %(dbg)s\n",
" if ( NOTNONE( THIS->tuple->value ) ) {\n",
" if ( C(TUPLE,THIS->tuple->value)->list->length == %(len)s ) {\n",
"// OUT( assort 0.1 ) ;\n",
" %(decl)s\n",
" task->next = newTASK( ref(newPARAM%(name)s_1( %(attr)s )), task->context, task->result, task->next, task->exit ) ;\n",
" %(check)s\n",
" }\n",
" }\n",
" \"\"\" % { 'name':name, 'len':len(parameters),\n",
" 'dbg': ( ''.join( [ 'LOG( C(TUPLE,THIS->tuple->value)->list->data[%s]->value ) ;' % str( i ) for i in range( len(parameters) ) ] ) ),\n",
" 'attr': ( ', '.join( [ p.n + '_ref' for p in parameters ] ) ),\n",
" 'decl': ( ''.join( [ \"\"\"\n",
" REFERENCE %(name)s_ref = refNEW( %(cls)s_type, any(NONE) ) ;\n",
" REFERENCE %(name)s_ref_ref = ref(%(name)s_ref) ;\n",
" \"\"\" % { 'name':p.n, 'cls':p.c } for p in parameters ] ) ),\n",
" 'check': ( ''.join( [ \"\"\"\n",
"// LOG( %(name)s_ref_ref ) ;\n",
" CONTEXT c%(name)s = newCONTEXT( task->context->closure, %(name)s_ref_ref, ref(newNOUN( C(TUPLE,THIS->tuple->value)->list->data[%(i)s] )) ) ;\n",
" task->next = newTASK( %(name)s_ref_ref, c%(name)s, ref(NONE), task->next, task->next ) ;\n",
" \"\"\" % { 'i':i, 'name':parameters[i].n } for i in range( len(parameters) ) ] ) )\n",
" } )\n",
"\n",
" T( 'PARAM' + name + '_1', None, None, [\n",
" A( 'REFERENCE', p.n + '_ref' ) for p in parameters\n",
" ], \"\"\"\n",
"// OUT( assort 1 ) ;\n",
"// %(dbg)s\n",
" if ( %(test)s ) {\n",
"// OUT( assort 1.1 ) ;\n",
" RETURN( newPARAM%(name)s_struct( %(pass)s ) ) ;\n",
" }\n",
" \"\"\" % { 'name':name,\n",
" 'dbg': ( ''.join( [ 'LOG( THIS->%s_ref->value ) ;' % p.n for p in parameters ] ) ),\n",
" 'test': ( ' && '.join( [ 'NOTNONE( THIS->%s_ref->value )' % p.n for p in parameters ] ) ),\n",
" 'pass': ( ', '.join( [ 'THIS->%s_ref' % p.n for p in parameters ] ) )\n",
" } )\n",
"\n",
" T( 'PARAM' + name + '_struct', None, None, [\n",
" A( 'REFERENCE', p.n + '_ref' ) for p in parameters\n",
" ], \"\"\"\n",
" DO_TYPE ;\n",
" %(attr)s\n",
" \"\"\" % { 'attr': ( ''.join( [ \"\"\"\n",
" ONWID( WI_%(p)s, THIS->%(p)s_ref->value ) ;\n",
" \"\"\" % { 'p':p.n } for p in parameters ] ) ) } )\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.15384615384615385,
0.05555555555555555,
0.07142857142857142,
0.08333333333333333,
0.07692307692307693,
0,
0.14285714285714285,
0.0625,
0.04,
0,
0.1111111111111111,
0,
0,
0.14814814814814814,
0,
0.041666666666666664,
0,
0.1111111111111111,
0,
0.14814814814814814,
0,
0.1111111111111111,
0.047619047619047616,
0.16666666666666666,
0,
0.1111111111111111,
0,
0.10526315789473684,
0,
0.09302325581395349,
0,
0,
0,
0.11764705882352941,
0,
0.09523809523809523,
0.14814814814814814,
0,
0.1111111111111111,
0.016666666666666666,
0.01,
0,
0.04672897196261682,
0.04672897196261682,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0.11764705882352941,
0,
0.1,
0,
0,
0,
0,
0.09523809523809523,
0,
0.08888888888888889,
0.011764705882352941,
0,
0,
0.10526315789473684,
0,
0.1111111111111111,
0,
0,
0,
0.125,
0,
0.015625,
0.06172839506172839,
0.009708737864077669,
0.13513513513513514,
0,
0.015873015873015872,
0.029850746268656716,
0.06976744186046512,
0.046153846153846156,
0.14705882352941177,
0,
0.047619047619047616,
0.047619047619047616,
0,
0.1346153846153846,
0.1346153846153846,
0,
0.1111111111111111,
0.16216216216216217,
0,
0,
0,
0.1111111111111111,
0.12903225806451613,
0,
0,
0,
0.1111111111111111,
0.06666666666666667,
0.08333333333333333,
0.16161616161616163,
0,
0,
0,
0,
0,
0,
0.06896551724137931,
0,
0.17391304347826086,
0.0625,
0.09523809523809523,
0.08333333333333333,
0.08888888888888889,
0.05555555555555555,
0.07692307692307693,
0.09090909090909091,
0,
0.23529411764705882,
0.0625,
0,
0.1111111111111111,
0.12903225806451613,
0,
0,
0.3333333333333333,
0,
0.10344827586206896,
0,
0.03773584905660377,
0,
0,
0.008620689655172414,
0.009615384615384616,
0,
0.15384615384615385,
0,
0.047619047619047616,
0.06451612903225806,
0,
0,
0,
0,
0,
0,
0,
0.008130081300813009,
0,
0,
0,
0.06521739130434782,
0.08695652173913043,
0.1044776119402985,
0.13793103448275862,
0,
0,
0.1076923076923077,
0.1,
0,
0.006711409395973154,
0.010309278350515464,
0.11904761904761904,
0.16666666666666666,
0,
0.047619047619047616,
0.03636363636363636,
0,
0,
0,
0,
0,
0,
0,
0.08695652173913043,
0.09090909090909091,
0.08421052631578947,
0.0945945945945946,
0.3333333333333333,
0,
0.0425531914893617,
0.03636363636363636,
0,
0,
0,
0.11428571428571428,
0,
0.16,
0,
0,
0,
0,
0,
0,
0,
1
] | 220 | 0.04649 |
import numpy
from functools import partial
moment_types = {
'tnra20m_nt': 'tab-new-recently-active20m-no-tab',
'tnra10s': 'tab-new-recently-active10s',
'window_open': 'window-open',
'athp': 'active-tab-hostname-progress',
'startup': 'startup'
}
def m_rates(m_short, up):
rates = list(filter(lambda x: not isinstance(x, str), up.log_set.type('MOMENT_REPORT') \
.filter(lambda x: x['attrs']['moment'] == moment_types[m_short]) \
.last()['attrs']['rates']))
return rates
def m_count(m_short, up):
return len(m_rates(m_short, up))
def m_mean(m_short, up):
rates = m_rates(m_short, up)
if len(rates) == 0 : return None
return numpy.median(numpy.array(rates))
exports = {k:v for m_short in moment_types for k,v in {
'n_%s' % m_short: partial(m_count, m_short),
'med_%s' % m_short: partial(m_mean, m_short)
}.items()
}
# equivalent to
# n_tnra20m_nt = partial(m_count, 'tnra20m_nt')
# med_tnra20m_nt = partial(m_mean, 'tnra20m_nt')
# n_tnra10s = partial(m_count, 'tnra10s')
# med_tnra10s = partial(m_mean, 'tnra10s')
# n_window_open = partial(m_count, 'window_open')
# med_window_open = partial(m_mean, 'window_open')
# n_athp = partial(m_count, 'athp')
# med_athp = partial(m_mean, 'athp')
# n_startup = partial(m_count, 'startup')
# med_startup = partial(m_mean, 'startup')
| [
"import numpy\n",
"from functools import partial\n",
"\n",
"moment_types = {\n",
" 'tnra20m_nt': 'tab-new-recently-active20m-no-tab',\n",
" 'tnra10s': 'tab-new-recently-active10s',\n",
" 'window_open': 'window-open',\n",
" 'athp': 'active-tab-hostname-progress',\n",
" 'startup': 'startup'\n",
" }\n",
"\n",
"def m_rates(m_short, up):\n",
" rates = list(filter(lambda x: not isinstance(x, str), up.log_set.type('MOMENT_REPORT') \\\n",
" .filter(lambda x: x['attrs']['moment'] == moment_types[m_short]) \\\n",
" .last()['attrs']['rates']))\n",
"\n",
" return rates\n",
"\n",
"def m_count(m_short, up):\n",
" return len(m_rates(m_short, up))\n",
"\n",
"def m_mean(m_short, up):\n",
" rates = m_rates(m_short, up)\n",
"\n",
" if len(rates) == 0 : return None\n",
"\n",
" return numpy.median(numpy.array(rates))\n",
"\n",
"exports = {k:v for m_short in moment_types for k,v in { \n",
" 'n_%s' % m_short: partial(m_count, m_short),\n",
" 'med_%s' % m_short: partial(m_mean, m_short)\n",
" }.items()\n",
" }\n",
"\n",
"# equivalent to \n",
"\n",
"# n_tnra20m_nt = partial(m_count, 'tnra20m_nt')\n",
"# med_tnra20m_nt = partial(m_mean, 'tnra20m_nt')\n",
"\n",
"# n_tnra10s = partial(m_count, 'tnra10s')\n",
"# med_tnra10s = partial(m_mean, 'tnra10s')\n",
"\n",
"# n_window_open = partial(m_count, 'window_open')\n",
"# med_window_open = partial(m_mean, 'window_open')\n",
"\n",
"# n_athp = partial(m_count, 'athp')\n",
"# med_athp = partial(m_mean, 'athp')\n",
"\n",
"# n_startup = partial(m_count, 'startup')\n",
"# med_startup = partial(m_mean, 'startup')\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0.021505376344086023,
0.02666666666666667,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0.04,
0,
0,
0.05405405405405406,
0,
0,
0,
0.08620689655172414,
0,
0,
0,
0.07142857142857142,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 51 | 0.028149 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.gnu.org/licenses/gpl-3.0.html
from __future__ import print_function
import os
import sys
import argparse
__author__ = "Amine Ghozlane"
__copyright__ = "Copyright 2015, Institut Pasteur"
__credits__ = ["Amine Ghozlane"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Amine Ghozlane"
__email__ = "[email protected]"
__status__ = "Developpement"
def isfile(path):
"""Check if path is an existing file.
Arguments:
path: Path to the file
"""
if not os.path.isfile(path):
if os.path.isdir(path):
msg = "{0} is a directory".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def getArguments():
"""Retrieves the arguments of the program.
Returns: An object that contains the arguments
"""
# Parsing arguments
parser = argparse.ArgumentParser(description=__doc__, usage=
"{0} -h".format(sys.argv[0]))
parser.add_argument('-i', dest='fasta_file', type=isfile, required=True,
help='Path to the fasta file.')
parser.add_argument('-n', dest='name', type=str, default="OTU_",
help='Relabel name (default= OTU_).')
parser.add_argument('-o', dest='output_file', type=str, default=None,
help='Output file.')
args = parser.parse_args()
return args
def fill(text, width=80):
"""Split text"""
return os.linesep.join(text[i:i+width] for i in xrange(0, len(text), width))
def rename_otu(fasta_file, name, output_file):
"""Add new label and rewrite text
"""
count = 1
if not output_file:
output = sys.stdout
else:
output = open(output_file, "wt")
header = ""
sequence = ""
try:
with open(fasta_file, "rt") as fast:
for line in fast:
if line.startswith(">"):
if len(header) > 0:
print(">{0}{1}{2}{3}".format(name, count, os.linesep,
fill(sequence)),
file=output)
sequence = ""
count +=1
header = line
else:
sequence += line.replace("\n", "").replace("\r", "")
print(">{0}{1}{2}{3}".format(name, count, os.linesep,
fill(sequence)),
file=output)
except IOError:
sys.exit("Error cannot open {0}".format(fasta_file))
if output_file:
output.close()
def main():
"""Main program
"""
args = getArguments()
rename_otu(args.fasta_file, args.name, args.output_file)
if __name__ == '__main__':
main() | [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"# A copy of the GNU General Public License is available at\n",
"# http://www.gnu.org/licenses/gpl-3.0.html\n",
"from __future__ import print_function\n",
"import os\n",
"import sys\n",
"import argparse\n",
"\n",
"__author__ = \"Amine Ghozlane\"\n",
"__copyright__ = \"Copyright 2015, Institut Pasteur\"\n",
"__credits__ = [\"Amine Ghozlane\"]\n",
"__license__ = \"GPL\"\n",
"__version__ = \"1.0.0\"\n",
"__maintainer__ = \"Amine Ghozlane\"\n",
"__email__ = \"[email protected]\"\n",
"__status__ = \"Developpement\"\n",
"\n",
"\n",
"def isfile(path):\n",
" \"\"\"Check if path is an existing file.\n",
" Arguments:\n",
" path: Path to the file\n",
" \"\"\"\n",
" if not os.path.isfile(path):\n",
" if os.path.isdir(path):\n",
" msg = \"{0} is a directory\".format(path)\n",
" else:\n",
" msg = \"{0} does not exist.\".format(path)\n",
" raise argparse.ArgumentTypeError(msg)\n",
" return path\n",
"\n",
"\n",
"def getArguments():\n",
" \"\"\"Retrieves the arguments of the program.\n",
" Returns: An object that contains the arguments\n",
" \"\"\"\n",
" # Parsing arguments\n",
" parser = argparse.ArgumentParser(description=__doc__, usage=\n",
" \"{0} -h\".format(sys.argv[0]))\n",
" parser.add_argument('-i', dest='fasta_file', type=isfile, required=True,\n",
" help='Path to the fasta file.')\n",
" parser.add_argument('-n', dest='name', type=str, default=\"OTU_\",\n",
" help='Relabel name (default= OTU_).')\n",
" parser.add_argument('-o', dest='output_file', type=str, default=None,\n",
" help='Output file.')\n",
" args = parser.parse_args()\n",
" return args\n",
"\n",
"\n",
"def fill(text, width=80):\n",
" \"\"\"Split text\"\"\"\n",
" return os.linesep.join(text[i:i+width] for i in xrange(0, len(text), width))\n",
"\n",
"\n",
"def rename_otu(fasta_file, name, output_file):\n",
" \"\"\"Add new label and rewrite text\n",
" \"\"\"\n",
" count = 1\n",
" if not output_file:\n",
" output = sys.stdout\n",
" else:\n",
" output = open(output_file, \"wt\")\n",
" header = \"\"\n",
" sequence = \"\"\n",
" try:\n",
" with open(fasta_file, \"rt\") as fast:\n",
" for line in fast:\n",
" if line.startswith(\">\"):\n",
" if len(header) > 0:\n",
" print(\">{0}{1}{2}{3}\".format(name, count, os.linesep,\n",
" fill(sequence)),\n",
" file=output)\n",
" sequence = \"\"\n",
" count +=1\n",
" header = line\n",
" else:\n",
" sequence += line.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n",
" print(\">{0}{1}{2}{3}\".format(name, count, os.linesep,\n",
" fill(sequence)),\n",
" file=output)\n",
" except IOError:\n",
" sys.exit(\"Error cannot open {0}\".format(fasta_file))\n",
" if output_file:\n",
" output.close()\n",
"\n",
"\n",
"def main():\n",
" \"\"\"Main program\n",
" \"\"\"\n",
" args = getArguments()\n",
" rename_otu(args.fasta_file, args.name, args.output_file)\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1
] | 104 | 0.001648 |
from __future__ import division
import math
import random
import sys
import time
from twisted.internet import defer, protocol, reactor
from twisted.python import failure, log
import p2pool
from p2pool import data as p2pool_data
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral, p2protocol, pack, variable
class PeerMisbehavingError(Exception):
pass
def fragment(f, **kwargs):
try:
f(**kwargs)
except p2protocol.TooLong:
fragment(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
fragment(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
class Protocol(p2protocol.Protocol):
VERSION = 1300
max_remembered_txs_size = 2500000
def __init__(self, node, incoming):
p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
self.node = node
self.incoming = incoming
self.other_version = None
self.connected2 = False
def connectionMade(self):
self.factory.proto_made_connection(self)
self.connection_lost_event = variable.Event()
self.addr = self.transport.getPeer().host, self.transport.getPeer().port
self.send_version(
version=self.VERSION,
services=0,
addr_to=dict(
services=0,
address=self.transport.getPeer().host,
port=self.transport.getPeer().port,
),
addr_from=dict(
services=0,
address=self.transport.getHost().host,
port=self.transport.getHost().port,
),
nonce=self.node.nonce,
sub_version=p2pool.__version__,
mode=1,
best_share_hash=self.node.best_share_hash_func(),
)
self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
self.get_shares = deferral.GenericDeferrer(
max_id=2**256,
func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
timeout=15,
on_timeout=self.disconnect,
)
self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
self.remote_remembered_txs_size = 0
self.remembered_txs = {} # view of peer's mining_txs
self.remembered_txs_size = 0
self.known_txs_cache = {}
def _connect_timeout(self):
self.timeout_delayed = None
print 'Handshake timed out, disconnecting from %s:%i' % self.addr
self.disconnect()
def packetReceived(self, command, payload2):
try:
if command != 'version' and not self.connected2:
raise PeerMisbehavingError('first message was not version message')
p2protocol.Protocol.packetReceived(self, command, payload2)
except PeerMisbehavingError, e:
print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
self.badPeerHappened()
def badPeerHappened(self):
print "Bad peer banned:", self.addr
self.disconnect()
if self.transport.getPeer().host != '127.0.0.1': # never ban localhost
self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
def _timeout(self):
self.timeout_delayed = None
print 'Connection timed out, disconnecting from %s:%i' % self.addr
self.disconnect()
message_version = pack.ComposedType([
('version', pack.IntType(32)),
('services', pack.IntType(64)),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', pack.IntType(64)),
('sub_version', pack.VarStrType()),
('mode', pack.IntType(32)), # always 1 for legacy compatibility
('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
])
def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
if self.other_version is not None:
raise PeerMisbehavingError('more than one version message')
if version < 1300:
raise PeerMisbehavingError('peer too old')
self.other_version = version
self.other_sub_version = sub_version[:512]
self.other_services = services
if nonce == self.node.nonce:
raise PeerMisbehavingError('was connected to self')
if nonce in self.node.peers:
if p2pool.DEBUG:
print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
self.disconnect()
return
self.nonce = nonce
self.connected2 = True
self.timeout_delayed.cancel()
self.timeout_delayed = reactor.callLater(100, self._timeout)
old_dataReceived = self.dataReceived
def new_dataReceived(data):
if self.timeout_delayed is not None:
self.timeout_delayed.reset(100)
old_dataReceived(data)
self.dataReceived = new_dataReceived
self.factory.proto_connected(self)
self._stop_thread = deferral.run_repeatedly(lambda: [
self.send_ping(),
random.expovariate(1/100)][-1])
if self.node.advertise_ip:
self._stop_thread2 = deferral.run_repeatedly(lambda: [
self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,
random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
if best_share_hash is not None:
self.node.handle_share_hashes([best_share_hash], self)
def update_remote_view_of_my_known_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.send_have_tx(tx_hashes=list(added))
if removed:
self.send_losing_tx(tx_hashes=list(removed))
# cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
reactor.callLater(20, self.known_txs_cache.pop, key)
watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
def update_remote_view_of_my_mining_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
if removed:
self.send_forget_tx(tx_hashes=list(removed))
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)
watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
message_ping = pack.ComposedType([])
def handle_ping(self):
pass
message_addrme = pack.ComposedType([
('port', pack.IntType(16)),
])
def handle_addrme(self, port):
host = self.transport.getPeer().host
#print 'addrme from', host, port
if host == '127.0.0.1':
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrme(port=port) # services...
else:
self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[
dict(
address=dict(
services=self.other_services,
address=host,
port=port,
),
timestamp=int(time.time()),
),
])
message_addrs = pack.ComposedType([
('addrs', pack.ListType(pack.ComposedType([
('timestamp', pack.IntType(64)),
('address', bitcoin_data.address_type),
]))),
])
def handle_addrs(self, addrs):
for addr_record in addrs:
self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
message_getaddrs = pack.ComposedType([
('count', pack.IntType(32)),
])
def handle_getaddrs(self, count):
if count > 100:
count = 100
self.send_addrs(addrs=[
dict(
timestamp=int(self.node.addr_store[host, port][2]),
address=dict(
services=self.node.addr_store[host, port][0],
address=host,
port=port,
),
) for host, port in
self.node.get_good_peers(count)
])
message_shares = pack.ComposedType([
('shares', pack.ListType(p2pool_data.share_type)),
])
def handle_shares(self, shares):
result = []
for wrappedshare in shares:
if wrappedshare['type'] < p2pool_data.Share.VERSION: continue
share = p2pool_data.load_share(wrappedshare, self.node.net, self.addr)
if wrappedshare['type'] >= 13:
txs = []
for tx_hash in share.share_info['new_transaction_hashes']:
if tx_hash in self.node.known_txs_var.value:
tx = self.node.known_txs_var.value[tx_hash]
else:
for cache in self.known_txs_cache.itervalues():
if tx_hash in cache:
tx = cache[tx_hash]
print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
break
else:
print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
self.disconnect()
return
txs.append(tx)
else:
txs = None
result.append((share, txs))
self.node.handle_shares(result, self)
def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
tx_hashes = set()
for share in shares:
if share.VERSION >= 13:
# send full transaction for every new_transaction_hash that peer does not know
for tx_hash in share.share_info['new_transaction_hashes']:
assert tx_hash in known_txs, 'tried to broadcast share without knowing all its new transactions'
if tx_hash not in self.remote_tx_hashes:
tx_hashes.add(tx_hash)
continue
if share.hash in include_txs_with:
x = share.get_other_tx_hashes(tracker)
if x is not None:
tx_hashes.update(x)
hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
if new_remote_remembered_txs_size > self.max_remembered_txs_size:
raise ValueError('shares have too many txs')
self.remote_remembered_txs_size = new_remote_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
fragment(self.send_shares, shares=[share.as_share() for share in shares])
self.send_forget_tx(tx_hashes=hashes_to_send)
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
message_sharereq = pack.ComposedType([
('id', pack.IntType(256)),
('hashes', pack.ListType(pack.IntType(256))),
('parents', pack.VarIntType()),
('stops', pack.ListType(pack.IntType(256))),
])
def handle_sharereq(self, id, hashes, parents, stops):
shares = self.node.handle_get_shares(hashes, parents, stops, self)
try:
self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
except p2protocol.TooLong:
self.send_sharereply(id=id, result='too long', shares=[])
message_sharereply = pack.ComposedType([
('id', pack.IntType(256)),
('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
('shares', pack.ListType(p2pool_data.share_type)),
])
class ShareReplyError(Exception): pass
def handle_sharereply(self, id, result, shares):
if result == 'good':
res = [p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= p2pool_data.Share.VERSION]
else:
res = failure.Failure(self.ShareReplyError(result))
self.get_shares.got_response(id, res)
message_bestblock = pack.ComposedType([
('header', bitcoin_data.block_header_type),
])
def handle_bestblock(self, header):
self.node.handle_bestblock(header, self)
message_have_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_have_tx(self, tx_hashes):
#assert self.remote_tx_hashes.isdisjoint(tx_hashes)
self.remote_tx_hashes.update(tx_hashes)
while len(self.remote_tx_hashes) > 10000:
self.remote_tx_hashes.pop()
message_losing_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_losing_tx(self, tx_hashes):
#assert self.remote_tx_hashes.issuperset(tx_hashes)
self.remote_tx_hashes.difference_update(tx_hashes)
message_remember_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
('txs', pack.ListType(bitcoin_data.tx_type)),
])
def handle_remember_tx(self, tx_hashes, txs):
for tx_hash in tx_hashes:
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.disconnect()
return
if tx_hash in self.node.known_txs_var.value:
tx = self.node.known_txs_var.value[tx_hash]
else:
for cache in self.known_txs_cache.itervalues():
if tx_hash in cache:
tx = cache[tx_hash]
print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
break
else:
print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
self.disconnect()
return
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs = dict(self.node.known_txs_var.value)
warned = False
for tx in txs:
tx_hash = bitcoin_data.singlehash256(bitcoin_data.tx_type.pack(tx))
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.disconnect()
return
if tx_hash in self.node.known_txs_var.value and not warned:
print 'Peer sent entire transaction %064x that was already received' % (tx_hash,)
warned = True
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs[tx_hash] = tx
self.node.known_txs_var.set(new_known_txs)
if self.remembered_txs_size >= self.max_remembered_txs_size:
raise PeerMisbehavingError('too much transaction data stored')
message_forget_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_forget_tx(self, tx_hashes):
for tx_hash in tx_hashes:
self.remembered_txs_size -= 100 + bitcoin_data.tx_type.packed_size(self.remembered_txs[tx_hash])
assert self.remembered_txs_size >= 0
del self.remembered_txs[tx_hash]
def connectionLost(self, reason):
self.connection_lost_event.happened()
if self.timeout_delayed is not None:
self.timeout_delayed.cancel()
if self.connected2:
self.factory.proto_disconnected(self, reason)
self._stop_thread()
if self.node.advertise_ip:
self._stop_thread2()
self.connected2 = False
self.factory.proto_lost_connection(self, reason)
if p2pool.DEBUG:
print "Peer connection lost:", self.addr, reason
self.get_shares.respond_all(reason)
@defer.inlineCallbacks
def do_ping(self):
start = reactor.seconds()
yield self.get_shares(hashes=[0], parents=0, stops=[])
end = reactor.seconds()
defer.returnValue(end - start)
class ServerFactory(protocol.ServerFactory):
def __init__(self, node, max_conns):
self.node = node
self.max_conns = max_conns
self.conns = {}
self.running = False
self.listen_port = None
def buildProtocol(self, addr):
if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
return None
if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
return None
p = Protocol(self.node, True)
p.factory = self
if p2pool.DEBUG:
print "Got peer connection from:", addr
return p
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def proto_made_connection(self, proto):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] = self.conns.get(ident, 0) + 1
def proto_lost_connection(self, proto, reason):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] -= 1
if not self.conns[ident]:
del self.conns[ident]
def proto_connected(self, proto):
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
def attempt_listen():
if self.running:
self.listen_port = reactor.listenTCP(self.node.port, self)
deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
def stop(self):
assert self.running
self.running = False
return self.listen_port.stopListening()
class ClientFactory(protocol.ClientFactory):
def __init__(self, node, desired_conns, max_attempts):
self.node = node
self.desired_conns = desired_conns
self.max_attempts = max_attempts
self.attempts = set()
self.conns = set()
self.running = False
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def buildProtocol(self, addr):
p = Protocol(self.node, False)
p.factory = self
return p
def startedConnecting(self, connector):
ident = self._host_to_ident(connector.getDestination().host)
if ident in self.attempts:
raise AssertionError('already have attempt')
self.attempts.add(ident)
def clientConnectionFailed(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def clientConnectionLost(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.conns.add(proto)
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.conns.remove(proto)
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def stop(self):
assert self.running
self.running = False
self._stop_thinking()
def _think(self):
try:
if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
(host, port), = self.node.get_good_peers(1)
if self._host_to_ident(host) in self.attempts:
pass
elif host in self.node.bans and self.node.bans[host] > time.time():
pass
else:
#print 'Trying to connect to', host, port
reactor.connectTCP(host, port, self, timeout=5)
except:
log.err()
return random.expovariate(1/1)
class SingleClientFactory(protocol.ReconnectingClientFactory):
def __init__(self, node):
self.node = node
def buildProtocol(self, addr):
p = Protocol(self.node, incoming=False)
p.factory = self
return p
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.resetDelay()
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
class Node(object):
def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({}), advertise_ip=True):
self.best_share_hash_func = best_share_hash_func
self.port = port
self.net = net
self.addr_store = dict(addr_store)
self.connect_addrs = connect_addrs
self.preferred_storage = preferred_storage
self.known_txs_var = known_txs_var
self.mining_txs_var = mining_txs_var
self.advertise_ip = advertise_ip
self.traffic_happened = variable.Event()
self.nonce = random.randrange(2**64)
self.peers = {}
self.bans = {} # address -> end_time
self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
self.serverfactory = ServerFactory(self, max_incoming_conns)
self.running = False
def start(self):
if self.running:
raise ValueError('already running')
self.clientfactory.start()
self.serverfactory.start()
self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def _think(self):
try:
if len(self.addr_store) < self.preferred_storage and self.peers:
random.choice(self.peers.values()).send_getaddrs(count=8)
except:
log.err()
return random.expovariate(1/20)
@defer.inlineCallbacks
def stop(self):
if not self.running:
raise ValueError('already stopped')
self.running = False
self._stop_thinking()
yield self.clientfactory.stop()
yield self.serverfactory.stop()
for singleclientconnector in self.singleclientconnectors:
yield singleclientconnector.factory.stopTrying()
yield singleclientconnector.disconnect()
del self.singleclientconnectors
def got_conn(self, conn):
if conn.nonce in self.peers:
raise ValueError('already have peer')
self.peers[conn.nonce] = conn
print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
def lost_conn(self, conn, reason):
if conn.nonce not in self.peers:
raise ValueError('''don't have peer''')
if conn is not self.peers[conn.nonce]:
raise ValueError('wrong conn')
del self.peers[conn.nonce]
print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
def got_addr(self, (host, port), services, timestamp):
if (host, port) in self.addr_store:
old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
else:
if len(self.addr_store) < 10000:
self.addr_store[host, port] = services, timestamp, timestamp
def handle_shares(self, shares, peer):
print 'handle_shares', (shares, peer)
def handle_share_hashes(self, hashes, peer):
print 'handle_share_hashes', (hashes, peer)
def handle_get_shares(self, hashes, parents, stops, peer):
print 'handle_get_shares', (hashes, parents, stops, peer)
def handle_bestblock(self, header, peer):
print 'handle_bestblock', header
def get_good_peers(self, max_count):
t = time.time()
return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
-math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
)][:max_count]
| [
"from __future__ import division\n",
"\n",
"import math\n",
"import random\n",
"import sys\n",
"import time\n",
"\n",
"from twisted.internet import defer, protocol, reactor\n",
"from twisted.python import failure, log\n",
"\n",
"import p2pool\n",
"from p2pool import data as p2pool_data\n",
"from p2pool.bitcoin import data as bitcoin_data\n",
"from p2pool.util import deferral, p2protocol, pack, variable\n",
"\n",
"class PeerMisbehavingError(Exception):\n",
" pass\n",
"\n",
"\n",
"def fragment(f, **kwargs):\n",
" try:\n",
" f(**kwargs)\n",
" except p2protocol.TooLong:\n",
" fragment(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))\n",
" fragment(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))\n",
"\n",
"class Protocol(p2protocol.Protocol):\n",
" VERSION = 1300\n",
" \n",
" max_remembered_txs_size = 2500000\n",
" \n",
" def __init__(self, node, incoming):\n",
" p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)\n",
" self.node = node\n",
" self.incoming = incoming\n",
" \n",
" self.other_version = None\n",
" self.connected2 = False\n",
" \n",
" def connectionMade(self):\n",
" self.factory.proto_made_connection(self)\n",
" \n",
" self.connection_lost_event = variable.Event()\n",
" \n",
" self.addr = self.transport.getPeer().host, self.transport.getPeer().port\n",
" \n",
" self.send_version(\n",
" version=self.VERSION,\n",
" services=0,\n",
" addr_to=dict(\n",
" services=0,\n",
" address=self.transport.getPeer().host,\n",
" port=self.transport.getPeer().port,\n",
" ),\n",
" addr_from=dict(\n",
" services=0,\n",
" address=self.transport.getHost().host,\n",
" port=self.transport.getHost().port,\n",
" ),\n",
" nonce=self.node.nonce,\n",
" sub_version=p2pool.__version__,\n",
" mode=1,\n",
" best_share_hash=self.node.best_share_hash_func(),\n",
" )\n",
" \n",
" self.timeout_delayed = reactor.callLater(10, self._connect_timeout)\n",
" \n",
" self.get_shares = deferral.GenericDeferrer(\n",
" max_id=2**256,\n",
" func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),\n",
" timeout=15,\n",
" on_timeout=self.disconnect,\n",
" )\n",
" \n",
" self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt\n",
" self.remote_remembered_txs_size = 0\n",
" \n",
" self.remembered_txs = {} # view of peer's mining_txs\n",
" self.remembered_txs_size = 0\n",
" self.known_txs_cache = {}\n",
" \n",
" def _connect_timeout(self):\n",
" self.timeout_delayed = None\n",
" print 'Handshake timed out, disconnecting from %s:%i' % self.addr\n",
" self.disconnect()\n",
" \n",
" def packetReceived(self, command, payload2):\n",
" try:\n",
" if command != 'version' and not self.connected2:\n",
" raise PeerMisbehavingError('first message was not version message')\n",
" p2protocol.Protocol.packetReceived(self, command, payload2)\n",
" except PeerMisbehavingError, e:\n",
" print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message\n",
" self.badPeerHappened()\n",
" \n",
" def badPeerHappened(self):\n",
" print \"Bad peer banned:\", self.addr\n",
" self.disconnect()\n",
" if self.transport.getPeer().host != '127.0.0.1': # never ban localhost\n",
" self.node.bans[self.transport.getPeer().host] = time.time() + 60*60\n",
" \n",
" def _timeout(self):\n",
" self.timeout_delayed = None\n",
" print 'Connection timed out, disconnecting from %s:%i' % self.addr\n",
" self.disconnect()\n",
" \n",
" message_version = pack.ComposedType([\n",
" ('version', pack.IntType(32)),\n",
" ('services', pack.IntType(64)),\n",
" ('addr_to', bitcoin_data.address_type),\n",
" ('addr_from', bitcoin_data.address_type),\n",
" ('nonce', pack.IntType(64)),\n",
" ('sub_version', pack.VarStrType()),\n",
" ('mode', pack.IntType(32)), # always 1 for legacy compatibility\n",
" ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),\n",
" ])\n",
" def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):\n",
" if self.other_version is not None:\n",
" raise PeerMisbehavingError('more than one version message')\n",
" if version < 1300:\n",
" raise PeerMisbehavingError('peer too old')\n",
" \n",
" self.other_version = version\n",
" self.other_sub_version = sub_version[:512]\n",
" self.other_services = services\n",
" \n",
" if nonce == self.node.nonce:\n",
" raise PeerMisbehavingError('was connected to self')\n",
" if nonce in self.node.peers:\n",
" if p2pool.DEBUG:\n",
" print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr\n",
" self.disconnect()\n",
" return\n",
" \n",
" self.nonce = nonce\n",
" self.connected2 = True\n",
" \n",
" self.timeout_delayed.cancel()\n",
" self.timeout_delayed = reactor.callLater(100, self._timeout)\n",
" \n",
" old_dataReceived = self.dataReceived\n",
" def new_dataReceived(data):\n",
" if self.timeout_delayed is not None:\n",
" self.timeout_delayed.reset(100)\n",
" old_dataReceived(data)\n",
" self.dataReceived = new_dataReceived\n",
" \n",
" self.factory.proto_connected(self)\n",
" \n",
" self._stop_thread = deferral.run_repeatedly(lambda: [\n",
" self.send_ping(),\n",
" random.expovariate(1/100)][-1])\n",
" \n",
" if self.node.advertise_ip:\n",
" self._stop_thread2 = deferral.run_repeatedly(lambda: [\n",
" self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,\n",
" random.expovariate(1/(100*len(self.node.peers) + 1))][-1])\n",
" \n",
" if best_share_hash is not None:\n",
" self.node.handle_share_hashes([best_share_hash], self)\n",
" \n",
" def update_remote_view_of_my_known_txs(before, after):\n",
" added = set(after) - set(before)\n",
" removed = set(before) - set(after)\n",
" if added:\n",
" self.send_have_tx(tx_hashes=list(added))\n",
" if removed:\n",
" self.send_losing_tx(tx_hashes=list(removed))\n",
" \n",
" # cache forgotten txs here for a little while so latency of \"losing_tx\" packets doesn't cause problems\n",
" key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0\n",
" self.known_txs_cache[key] = dict((h, before[h]) for h in removed)\n",
" reactor.callLater(20, self.known_txs_cache.pop, key)\n",
" watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)\n",
" self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))\n",
" \n",
" self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())\n",
" \n",
" def update_remote_view_of_my_mining_txs(before, after):\n",
" added = set(after) - set(before)\n",
" removed = set(before) - set(after)\n",
" if added:\n",
" self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)\n",
" assert self.remote_remembered_txs_size <= self.max_remembered_txs_size\n",
" fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])\n",
" if removed:\n",
" self.send_forget_tx(tx_hashes=list(removed))\n",
" self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)\n",
" watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)\n",
" self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))\n",
" \n",
" self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())\n",
" assert self.remote_remembered_txs_size <= self.max_remembered_txs_size\n",
" fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())\n",
" \n",
" message_ping = pack.ComposedType([])\n",
" def handle_ping(self):\n",
" pass\n",
" \n",
" message_addrme = pack.ComposedType([\n",
" ('port', pack.IntType(16)),\n",
" ])\n",
" def handle_addrme(self, port):\n",
" host = self.transport.getPeer().host\n",
" #print 'addrme from', host, port\n",
" if host == '127.0.0.1':\n",
" if random.random() < .8 and self.node.peers:\n",
" random.choice(self.node.peers.values()).send_addrme(port=port) # services...\n",
" else:\n",
" self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))\n",
" if random.random() < .8 and self.node.peers:\n",
" random.choice(self.node.peers.values()).send_addrs(addrs=[\n",
" dict(\n",
" address=dict(\n",
" services=self.other_services,\n",
" address=host,\n",
" port=port,\n",
" ),\n",
" timestamp=int(time.time()),\n",
" ),\n",
" ])\n",
" \n",
" message_addrs = pack.ComposedType([\n",
" ('addrs', pack.ListType(pack.ComposedType([\n",
" ('timestamp', pack.IntType(64)),\n",
" ('address', bitcoin_data.address_type),\n",
" ]))),\n",
" ])\n",
" def handle_addrs(self, addrs):\n",
" for addr_record in addrs:\n",
" self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))\n",
" if random.random() < .8 and self.node.peers:\n",
" random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])\n",
" \n",
" message_getaddrs = pack.ComposedType([\n",
" ('count', pack.IntType(32)),\n",
" ])\n",
" def handle_getaddrs(self, count):\n",
" if count > 100:\n",
" count = 100\n",
" self.send_addrs(addrs=[\n",
" dict(\n",
" timestamp=int(self.node.addr_store[host, port][2]),\n",
" address=dict(\n",
" services=self.node.addr_store[host, port][0],\n",
" address=host,\n",
" port=port,\n",
" ),\n",
" ) for host, port in\n",
" self.node.get_good_peers(count)\n",
" ])\n",
" \n",
" message_shares = pack.ComposedType([\n",
" ('shares', pack.ListType(p2pool_data.share_type)),\n",
" ])\n",
" def handle_shares(self, shares):\n",
" result = []\n",
" for wrappedshare in shares:\n",
" if wrappedshare['type'] < p2pool_data.Share.VERSION: continue\n",
" share = p2pool_data.load_share(wrappedshare, self.node.net, self.addr)\n",
" if wrappedshare['type'] >= 13:\n",
" txs = []\n",
" for tx_hash in share.share_info['new_transaction_hashes']:\n",
" if tx_hash in self.node.known_txs_var.value:\n",
" tx = self.node.known_txs_var.value[tx_hash]\n",
" else:\n",
" for cache in self.known_txs_cache.itervalues():\n",
" if tx_hash in cache:\n",
" tx = cache[tx_hash]\n",
" print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)\n",
" break\n",
" else:\n",
" print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)\n",
" self.disconnect()\n",
" return\n",
" txs.append(tx)\n",
" else:\n",
" txs = None\n",
" \n",
" result.append((share, txs))\n",
" \n",
" self.node.handle_shares(result, self)\n",
" \n",
" def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):\n",
" tx_hashes = set()\n",
" for share in shares:\n",
" if share.VERSION >= 13:\n",
" # send full transaction for every new_transaction_hash that peer does not know\n",
" for tx_hash in share.share_info['new_transaction_hashes']:\n",
" assert tx_hash in known_txs, 'tried to broadcast share without knowing all its new transactions'\n",
" if tx_hash not in self.remote_tx_hashes:\n",
" tx_hashes.add(tx_hash)\n",
" continue\n",
" if share.hash in include_txs_with:\n",
" x = share.get_other_tx_hashes(tracker)\n",
" if x is not None:\n",
" tx_hashes.update(x)\n",
" \n",
" hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]\n",
" \n",
" new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)\n",
" if new_remote_remembered_txs_size > self.max_remembered_txs_size:\n",
" raise ValueError('shares have too many txs')\n",
" self.remote_remembered_txs_size = new_remote_remembered_txs_size\n",
" \n",
" fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])\n",
" \n",
" fragment(self.send_shares, shares=[share.as_share() for share in shares])\n",
" \n",
" self.send_forget_tx(tx_hashes=hashes_to_send)\n",
" \n",
" self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)\n",
" \n",
" \n",
" message_sharereq = pack.ComposedType([\n",
" ('id', pack.IntType(256)),\n",
" ('hashes', pack.ListType(pack.IntType(256))),\n",
" ('parents', pack.VarIntType()),\n",
" ('stops', pack.ListType(pack.IntType(256))),\n",
" ])\n",
" def handle_sharereq(self, id, hashes, parents, stops):\n",
" shares = self.node.handle_get_shares(hashes, parents, stops, self)\n",
" try:\n",
" self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])\n",
" except p2protocol.TooLong:\n",
" self.send_sharereply(id=id, result='too long', shares=[])\n",
" \n",
" message_sharereply = pack.ComposedType([\n",
" ('id', pack.IntType(256)),\n",
" ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),\n",
" ('shares', pack.ListType(p2pool_data.share_type)),\n",
" ])\n",
" class ShareReplyError(Exception): pass\n",
" def handle_sharereply(self, id, result, shares):\n",
" if result == 'good':\n",
" res = [p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= p2pool_data.Share.VERSION]\n",
" else:\n",
" res = failure.Failure(self.ShareReplyError(result))\n",
" self.get_shares.got_response(id, res)\n",
" \n",
" \n",
" message_bestblock = pack.ComposedType([\n",
" ('header', bitcoin_data.block_header_type),\n",
" ])\n",
" def handle_bestblock(self, header):\n",
" self.node.handle_bestblock(header, self)\n",
" \n",
" \n",
" message_have_tx = pack.ComposedType([\n",
" ('tx_hashes', pack.ListType(pack.IntType(256))),\n",
" ])\n",
" def handle_have_tx(self, tx_hashes):\n",
" #assert self.remote_tx_hashes.isdisjoint(tx_hashes)\n",
" self.remote_tx_hashes.update(tx_hashes)\n",
" while len(self.remote_tx_hashes) > 10000:\n",
" self.remote_tx_hashes.pop()\n",
" message_losing_tx = pack.ComposedType([\n",
" ('tx_hashes', pack.ListType(pack.IntType(256))),\n",
" ])\n",
" def handle_losing_tx(self, tx_hashes):\n",
" #assert self.remote_tx_hashes.issuperset(tx_hashes)\n",
" self.remote_tx_hashes.difference_update(tx_hashes)\n",
" \n",
" \n",
" message_remember_tx = pack.ComposedType([\n",
" ('tx_hashes', pack.ListType(pack.IntType(256))),\n",
" ('txs', pack.ListType(bitcoin_data.tx_type)),\n",
" ])\n",
" def handle_remember_tx(self, tx_hashes, txs):\n",
" for tx_hash in tx_hashes:\n",
" if tx_hash in self.remembered_txs:\n",
" print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'\n",
" self.disconnect()\n",
" return\n",
" \n",
" if tx_hash in self.node.known_txs_var.value:\n",
" tx = self.node.known_txs_var.value[tx_hash]\n",
" else:\n",
" for cache in self.known_txs_cache.itervalues():\n",
" if tx_hash in cache:\n",
" tx = cache[tx_hash]\n",
" print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)\n",
" break\n",
" else:\n",
" print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)\n",
" self.disconnect()\n",
" return\n",
" \n",
" self.remembered_txs[tx_hash] = tx\n",
" self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)\n",
" new_known_txs = dict(self.node.known_txs_var.value)\n",
" warned = False\n",
" for tx in txs:\n",
" tx_hash = bitcoin_data.singlehash256(bitcoin_data.tx_type.pack(tx))\n",
" if tx_hash in self.remembered_txs:\n",
" print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'\n",
" self.disconnect()\n",
" return\n",
" \n",
" if tx_hash in self.node.known_txs_var.value and not warned:\n",
" print 'Peer sent entire transaction %064x that was already received' % (tx_hash,)\n",
" warned = True\n",
" \n",
" self.remembered_txs[tx_hash] = tx\n",
" self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)\n",
" new_known_txs[tx_hash] = tx\n",
" self.node.known_txs_var.set(new_known_txs)\n",
" if self.remembered_txs_size >= self.max_remembered_txs_size:\n",
" raise PeerMisbehavingError('too much transaction data stored')\n",
" message_forget_tx = pack.ComposedType([\n",
" ('tx_hashes', pack.ListType(pack.IntType(256))),\n",
" ])\n",
" def handle_forget_tx(self, tx_hashes):\n",
" for tx_hash in tx_hashes:\n",
" self.remembered_txs_size -= 100 + bitcoin_data.tx_type.packed_size(self.remembered_txs[tx_hash])\n",
" assert self.remembered_txs_size >= 0\n",
" del self.remembered_txs[tx_hash]\n",
" \n",
" \n",
" def connectionLost(self, reason):\n",
" self.connection_lost_event.happened()\n",
" if self.timeout_delayed is not None:\n",
" self.timeout_delayed.cancel()\n",
" if self.connected2:\n",
" self.factory.proto_disconnected(self, reason)\n",
" self._stop_thread()\n",
" if self.node.advertise_ip:\n",
" self._stop_thread2()\n",
" self.connected2 = False\n",
" self.factory.proto_lost_connection(self, reason)\n",
" if p2pool.DEBUG:\n",
" print \"Peer connection lost:\", self.addr, reason\n",
" self.get_shares.respond_all(reason)\n",
" \n",
" @defer.inlineCallbacks\n",
" def do_ping(self):\n",
" start = reactor.seconds()\n",
" yield self.get_shares(hashes=[0], parents=0, stops=[])\n",
" end = reactor.seconds()\n",
" defer.returnValue(end - start)\n",
"\n",
"class ServerFactory(protocol.ServerFactory):\n",
" def __init__(self, node, max_conns):\n",
" self.node = node\n",
" self.max_conns = max_conns\n",
" \n",
" self.conns = {}\n",
" self.running = False\n",
" self.listen_port = None\n",
" \n",
" def buildProtocol(self, addr):\n",
" if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:\n",
" return None\n",
" if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():\n",
" return None\n",
" p = Protocol(self.node, True)\n",
" p.factory = self\n",
" if p2pool.DEBUG:\n",
" print \"Got peer connection from:\", addr\n",
" return p\n",
" \n",
" def _host_to_ident(self, host):\n",
" a, b, c, d = host.split('.')\n",
" return a, b\n",
" \n",
" def proto_made_connection(self, proto):\n",
" ident = self._host_to_ident(proto.transport.getPeer().host)\n",
" self.conns[ident] = self.conns.get(ident, 0) + 1\n",
" def proto_lost_connection(self, proto, reason):\n",
" ident = self._host_to_ident(proto.transport.getPeer().host)\n",
" self.conns[ident] -= 1\n",
" if not self.conns[ident]:\n",
" del self.conns[ident]\n",
" \n",
" def proto_connected(self, proto):\n",
" self.node.got_conn(proto)\n",
" def proto_disconnected(self, proto, reason):\n",
" self.node.lost_conn(proto, reason)\n",
" \n",
" def start(self):\n",
" assert not self.running\n",
" self.running = True\n",
" \n",
" def attempt_listen():\n",
" if self.running:\n",
" self.listen_port = reactor.listenTCP(self.node.port, self)\n",
" deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()\n",
" \n",
" def stop(self):\n",
" assert self.running\n",
" self.running = False\n",
" \n",
" return self.listen_port.stopListening()\n",
"\n",
"class ClientFactory(protocol.ClientFactory):\n",
" def __init__(self, node, desired_conns, max_attempts):\n",
" self.node = node\n",
" self.desired_conns = desired_conns\n",
" self.max_attempts = max_attempts\n",
" \n",
" self.attempts = set()\n",
" self.conns = set()\n",
" self.running = False\n",
" \n",
" def _host_to_ident(self, host):\n",
" a, b, c, d = host.split('.')\n",
" return a, b\n",
" \n",
" def buildProtocol(self, addr):\n",
" p = Protocol(self.node, False)\n",
" p.factory = self\n",
" return p\n",
" \n",
" def startedConnecting(self, connector):\n",
" ident = self._host_to_ident(connector.getDestination().host)\n",
" if ident in self.attempts:\n",
" raise AssertionError('already have attempt')\n",
" self.attempts.add(ident)\n",
" \n",
" def clientConnectionFailed(self, connector, reason):\n",
" self.attempts.remove(self._host_to_ident(connector.getDestination().host))\n",
" \n",
" def clientConnectionLost(self, connector, reason):\n",
" self.attempts.remove(self._host_to_ident(connector.getDestination().host))\n",
" \n",
" def proto_made_connection(self, proto):\n",
" pass\n",
" def proto_lost_connection(self, proto, reason):\n",
" pass\n",
" \n",
" def proto_connected(self, proto):\n",
" self.conns.add(proto)\n",
" self.node.got_conn(proto)\n",
" def proto_disconnected(self, proto, reason):\n",
" self.conns.remove(proto)\n",
" self.node.lost_conn(proto, reason)\n",
" \n",
" def start(self):\n",
" assert not self.running\n",
" self.running = True\n",
" self._stop_thinking = deferral.run_repeatedly(self._think)\n",
" def stop(self):\n",
" assert self.running\n",
" self.running = False\n",
" self._stop_thinking()\n",
" \n",
" def _think(self):\n",
" try:\n",
" if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:\n",
" (host, port), = self.node.get_good_peers(1)\n",
" \n",
" if self._host_to_ident(host) in self.attempts:\n",
" pass\n",
" elif host in self.node.bans and self.node.bans[host] > time.time():\n",
" pass\n",
" else:\n",
" #print 'Trying to connect to', host, port\n",
" reactor.connectTCP(host, port, self, timeout=5)\n",
" except:\n",
" log.err()\n",
" \n",
" return random.expovariate(1/1)\n",
"\n",
"class SingleClientFactory(protocol.ReconnectingClientFactory):\n",
" def __init__(self, node):\n",
" self.node = node\n",
" \n",
" def buildProtocol(self, addr):\n",
" p = Protocol(self.node, incoming=False)\n",
" p.factory = self\n",
" return p\n",
" \n",
" def proto_made_connection(self, proto):\n",
" pass\n",
" def proto_lost_connection(self, proto, reason):\n",
" pass\n",
" \n",
" def proto_connected(self, proto):\n",
" self.resetDelay()\n",
" self.node.got_conn(proto)\n",
" def proto_disconnected(self, proto, reason):\n",
" self.node.lost_conn(proto, reason)\n",
"\n",
"class Node(object):\n",
" def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({}), advertise_ip=True):\n",
" self.best_share_hash_func = best_share_hash_func\n",
" self.port = port\n",
" self.net = net\n",
" self.addr_store = dict(addr_store)\n",
" self.connect_addrs = connect_addrs\n",
" self.preferred_storage = preferred_storage\n",
" self.known_txs_var = known_txs_var\n",
" self.mining_txs_var = mining_txs_var\n",
" self.advertise_ip = advertise_ip\n",
" \n",
" self.traffic_happened = variable.Event()\n",
" self.nonce = random.randrange(2**64)\n",
" self.peers = {}\n",
" self.bans = {} # address -> end_time\n",
" self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)\n",
" self.serverfactory = ServerFactory(self, max_incoming_conns)\n",
" self.running = False\n",
" \n",
" def start(self):\n",
" if self.running:\n",
" raise ValueError('already running')\n",
" \n",
" self.clientfactory.start()\n",
" self.serverfactory.start()\n",
" self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]\n",
" \n",
" self.running = True\n",
" \n",
" self._stop_thinking = deferral.run_repeatedly(self._think)\n",
" \n",
" def _think(self):\n",
" try:\n",
" if len(self.addr_store) < self.preferred_storage and self.peers:\n",
" random.choice(self.peers.values()).send_getaddrs(count=8)\n",
" except:\n",
" log.err()\n",
" \n",
" return random.expovariate(1/20)\n",
" \n",
" @defer.inlineCallbacks\n",
" def stop(self):\n",
" if not self.running:\n",
" raise ValueError('already stopped')\n",
" \n",
" self.running = False\n",
" \n",
" self._stop_thinking()\n",
" yield self.clientfactory.stop()\n",
" yield self.serverfactory.stop()\n",
" for singleclientconnector in self.singleclientconnectors:\n",
" yield singleclientconnector.factory.stopTrying()\n",
" yield singleclientconnector.disconnect()\n",
" del self.singleclientconnectors\n",
" \n",
" def got_conn(self, conn):\n",
" if conn.nonce in self.peers:\n",
" raise ValueError('already have peer')\n",
" self.peers[conn.nonce] = conn\n",
" \n",
" print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)\n",
" \n",
" def lost_conn(self, conn, reason):\n",
" if conn.nonce not in self.peers:\n",
" raise ValueError('''don't have peer''')\n",
" if conn is not self.peers[conn.nonce]:\n",
" raise ValueError('wrong conn')\n",
" del self.peers[conn.nonce]\n",
" \n",
" print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())\n",
" \n",
" \n",
" def got_addr(self, (host, port), services, timestamp):\n",
" if (host, port) in self.addr_store:\n",
" old_services, old_first_seen, old_last_seen = self.addr_store[host, port]\n",
" self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)\n",
" else:\n",
" if len(self.addr_store) < 10000:\n",
" self.addr_store[host, port] = services, timestamp, timestamp\n",
" \n",
" def handle_shares(self, shares, peer):\n",
" print 'handle_shares', (shares, peer)\n",
" \n",
" def handle_share_hashes(self, hashes, peer):\n",
" print 'handle_share_hashes', (hashes, peer)\n",
" \n",
" def handle_get_shares(self, hashes, parents, stops, peer):\n",
" print 'handle_get_shares', (hashes, parents, stops, peer)\n",
" \n",
" def handle_bestblock(self, header, peer):\n",
" print 'handle_bestblock', header\n",
" \n",
" def get_good_peers(self, max_count):\n",
" t = time.time()\n",
" return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):\n",
" -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)\n",
" )][:max_count]\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0.2,
0,
0.2,
0,
0.010869565217391304,
0,
0,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0.012345679012345678,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0.008064516129032258,
0,
0,
0,
0.1111111111111111,
0.013793103448275862,
0,
0.1111111111111111,
0.01639344262295082,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.011904761904761904,
0,
0,
0.010638297872340425,
0,
0.2,
0,
0,
0,
0.012658227848101266,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.027777777777777776,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0.025,
0.1111111111111111,
0,
0,
0.006578947368421052,
0.014084507042253521,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.008403361344537815,
0.012048192771084338,
0.012195121951219513,
0,
0.01020408163265306,
0.009523809523809525,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0,
0,
0.008333333333333333,
0.011494252873563218,
0.005780346820809248,
0,
0,
0.008130081300813009,
0.009900990099009901,
0.009345794392523364,
0.1111111111111111,
0.007194244604316547,
0,
0.010101010101010102,
0.2,
0,
0.037037037037037035,
0,
0.2,
0,
0,
0,
0.02857142857142857,
0,
0.024390243902439025,
0,
0,
0.021505376344086023,
0,
0.009174311926605505,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0.005405405405405406,
0,
0.011363636363636364,
0.2,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.02702702702702703,
0,
0,
0.013513513513513514,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0.07692307692307693,
0,
0.2,
0,
0,
0,
0,
0.010526315789473684,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.00909090909090909,
0.1111111111111111,
0.006369426751592357,
0,
0,
0,
0.1111111111111111,
0.0053475935828877,
0.1111111111111111,
0.012195121951219513,
0.1111111111111111,
0,
0.1111111111111111,
0.008,
0.2,
0.2,
0.023255813953488372,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0.00980392156862745,
0,
0,
0.2,
0,
0,
0.0072992700729927005,
0,
0,
0.023255813953488372,
0.018867924528301886,
0,
0.007042253521126761,
0,
0,
0,
0.2,
0.2,
0.022727272727272728,
0,
0,
0.025,
0,
0.2,
0.2,
0.023809523809523808,
0,
0,
0.024390243902439025,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.016666666666666666,
0,
0.2,
0.2,
0.021739130434782608,
0,
0,
0,
0.02,
0,
0,
0.011494252873563218,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.008928571428571428,
0,
0,
0.07692307692307693,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.07692307692307693,
0,
0.01020408163265306,
0,
0.07692307692307693,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0.009174311926605505,
0,
0,
0.2,
0.2,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.2,
0,
0.008547008547008548,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.2,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0.2,
0,
0,
0.02040816326530612,
0,
0.2,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.011363636363636364,
0.2,
0,
0,
0,
0.1111111111111111,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.2,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0.2,
0,
0.012048192771084338,
0.2,
0,
0.012048192771084338,
0.2,
0,
0,
0.019230769230769232,
0,
0.2,
0,
0,
0,
0.02040816326530612,
0,
0,
0.2,
0,
0,
0,
0,
0.05,
0,
0,
0,
0.2,
0,
0,
0.008264462809917356,
0,
0.058823529411764705,
0,
0,
0.011904761904761904,
0,
0,
0.016129032258064516,
0,
0.0625,
0,
0.1111111111111111,
0,
0,
0.015873015873015872,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0,
0.019230769230769232,
0,
0.2,
0,
0,
0,
0.02040816326530612,
0,
0,
0.05,
0.003484320557491289,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.022222222222222223,
0.010416666666666666,
0,
0,
0.2,
0,
0,
0,
0.1111111111111111,
0,
0,
0.007407407407407408,
0.1111111111111111,
0,
0.1111111111111111,
0,
0.2,
0,
0,
0,
0,
0.0625,
0,
0.1111111111111111,
0,
0.2,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.1111111111111111,
0.005025125628140704,
0.2,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.010638297872340425,
0.2,
0.2,
0.01694915254237288,
0,
0.011627906976744186,
0.01020408163265306,
0,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0.008547008547008548,
0.017543859649122806,
0
] | 681 | 0.026582 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
import frappe.desk.query_report
from frappe.utils import cint
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.modules import make_boilerplate
class Report(Document):
def validate(self):
"""only administrator can save standard report"""
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "No" and frappe.db.get_value("Report", self.name, "is_standard") == "Yes":
frappe.throw(_("Cannot edit a standard report. Please duplicate and create a new report"))
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.throw(_("Only Administrator can save a standard report. Please rename and save."))
if self.report_type in ("Query Report", "Script Report") \
and frappe.session.user!="Administrator":
frappe.throw(_("Only Administrator allowed to create Query / Script Reports"))
if self.report_type == "Report Builder":
self.update_report_json()
def on_update(self):
self.export_doc()
def update_report_json(self):
if self.json:
data = json.loads(self.json)
data["add_total_row"] = self.add_total_row
self.json = json.dumps(data)
def export_doc(self):
if frappe.flags.in_import:
return
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module)
self.create_report_py()
def create_report_py(self):
if self.report_type == "Script Report":
make_boilerplate("controller.py", self, {"name": self.name})
make_boilerplate("controller.js", self, {"name": self.name})
def get_data(self, filters=None, limit=None, user=None):
out = []
if self.report_type in ('Query Report', 'Script Report'):
# query and script reports
data = frappe.desk.query_report.run(self.name, filters=filters, user=user)
columns_list = []
for d in data.get('columns'):
if isinstance(d, dict):
columns_list.append(d.get('label'))
else:
columns_list.append(d.split(':')[0])
out.append(columns_list)
out += data.get('result')
else:
# standard report
params = json.loads(self.json)
columns = params.get('columns')
filters = params.get('filters')
def _format(parts):
# sort by is saved as DocType.fieldname, covert it to sql
return '`tab{0}`.`{1}`'.format(*parts)
order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
if params.get('sort_by_next'):
order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
result = frappe.get_list(self.ref_doctype, fields = [_format([c[1], c[0]]) for c in columns],
filters=filters, order_by = order_by, as_list=True, limit=limit, user=user)
meta = frappe.get_meta(self.ref_doctype)
out.append([meta.get_label(c[0]) for c in columns])
out = out + [list(d) for d in result]
return out
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable))
| [
"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n",
"# MIT License. See license.txt\n",
"\n",
"from __future__ import unicode_literals\n",
"import frappe\n",
"import json\n",
"from frappe import _\n",
"import frappe.desk.query_report\n",
"from frappe.utils import cint\n",
"from frappe.model.document import Document\n",
"from frappe.modules.export_file import export_to_files\n",
"from frappe.modules import make_boilerplate\n",
"\n",
"class Report(Document):\n",
"\tdef validate(self):\n",
"\t\t\"\"\"only administrator can save standard report\"\"\"\n",
"\t\tif not self.module:\n",
"\t\t\tself.module = frappe.db.get_value(\"DocType\", self.ref_doctype, \"module\")\n",
"\n",
"\t\tif not self.is_standard:\n",
"\t\t\tself.is_standard = \"No\"\n",
"\t\t\tif frappe.session.user==\"Administrator\" and getattr(frappe.local.conf, 'developer_mode',0)==1:\n",
"\t\t\t\tself.is_standard = \"Yes\"\n",
"\n",
"\t\tif self.is_standard == \"No\" and frappe.db.get_value(\"Report\", self.name, \"is_standard\") == \"Yes\":\n",
"\t\t\tfrappe.throw(_(\"Cannot edit a standard report. Please duplicate and create a new report\"))\n",
"\n",
"\t\tif self.is_standard == \"Yes\" and frappe.session.user!=\"Administrator\":\n",
"\t\t\tfrappe.throw(_(\"Only Administrator can save a standard report. Please rename and save.\"))\n",
"\n",
"\t\tif self.report_type in (\"Query Report\", \"Script Report\") \\\n",
"\t\t\tand frappe.session.user!=\"Administrator\":\n",
"\t\t\tfrappe.throw(_(\"Only Administrator allowed to create Query / Script Reports\"))\n",
"\n",
"\t\tif self.report_type == \"Report Builder\":\n",
"\t\t\tself.update_report_json()\n",
"\n",
"\tdef on_update(self):\n",
"\t\tself.export_doc()\n",
"\n",
"\tdef update_report_json(self):\n",
"\t\tif self.json:\n",
"\t\t\tdata = json.loads(self.json)\n",
"\t\t\tdata[\"add_total_row\"] = self.add_total_row\n",
"\t\t\tself.json = json.dumps(data)\n",
"\n",
"\tdef export_doc(self):\n",
"\t\tif frappe.flags.in_import:\n",
"\t\t\treturn\n",
"\n",
"\t\tif self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:\n",
"\t\t\texport_to_files(record_list=[['Report', self.name]],\n",
"\t\t\t\trecord_module=self.module)\n",
"\n",
"\t\t\tself.create_report_py()\n",
"\n",
"\tdef create_report_py(self):\n",
"\t\tif self.report_type == \"Script Report\":\n",
"\t\t\tmake_boilerplate(\"controller.py\", self, {\"name\": self.name})\n",
"\t\t\tmake_boilerplate(\"controller.js\", self, {\"name\": self.name})\n",
"\n",
"\tdef get_data(self, filters=None, limit=None, user=None):\n",
"\n",
"\t\tout = []\n",
"\n",
"\t\tif self.report_type in ('Query Report', 'Script Report'):\n",
"\t\t\t# query and script reports\n",
"\t\t\tdata = frappe.desk.query_report.run(self.name, filters=filters, user=user)\n",
"\t\t\tcolumns_list = []\n",
"\t\t\tfor d in data.get('columns'):\n",
"\t\t\t\tif isinstance(d, dict):\n",
"\t\t\t\t\tcolumns_list.append(d.get('label'))\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tcolumns_list.append(d.split(':')[0])\n",
"\n",
"\t\t\tout.append(columns_list)\n",
"\t\t\tout += data.get('result')\n",
"\t\telse:\n",
"\t\t\t# standard report\n",
"\t\t\tparams = json.loads(self.json)\n",
"\t\t\tcolumns = params.get('columns')\n",
"\t\t\tfilters = params.get('filters')\n",
"\n",
"\t\t\tdef _format(parts):\n",
"\t\t\t\t# sort by is saved as DocType.fieldname, covert it to sql\n",
"\t\t\t\treturn '`tab{0}`.`{1}`'.format(*parts)\n",
"\n",
"\t\t\torder_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')\n",
"\t\t\tif params.get('sort_by_next'):\n",
"\t\t\t\torder_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')\n",
"\n",
"\t\t\tresult = frappe.get_list(self.ref_doctype, fields = [_format([c[1], c[0]]) for c in columns],\n",
"\t\t\t\tfilters=filters, order_by = order_by, as_list=True, limit=limit, user=user)\n",
"\n",
"\t\t\tmeta = frappe.get_meta(self.ref_doctype)\n",
"\n",
"\t\t\tout.append([meta.get_label(c[0]) for c in columns])\n",
"\t\t\tout = out + [list(d) for d in result]\n",
"\n",
"\t\treturn out\n",
"\n",
"\n",
"\[email protected]\n",
"\tdef toggle_disable(self, disable):\n",
"\t\tself.db_set(\"disabled\", cint(disable))\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0.047619047619047616,
0.019230769230769232,
0.045454545454545456,
0.013157894736842105,
0,
0.037037037037037035,
0.037037037037037035,
0.05102040816326531,
0.034482758620689655,
0,
0.02,
0.02127659574468085,
0,
0.0273972602739726,
0.021505376344086023,
0,
0.01639344262295082,
0.044444444444444446,
0.024390243902439025,
0,
0.023255813953488372,
0.034482758620689655,
0,
0.045454545454545456,
0.05,
0,
0.03225806451612903,
0.0625,
0.03125,
0.021739130434782608,
0.03125,
0,
0.043478260869565216,
0.034482758620689655,
0.1,
0,
0.022727272727272728,
0.017857142857142856,
0.06451612903225806,
0,
0.037037037037037035,
0,
0.034482758620689655,
0.023809523809523808,
0.015625,
0.015625,
0,
0.017241379310344827,
0,
0.09090909090909091,
0,
0.016666666666666666,
0.03333333333333333,
0.01282051282051282,
0.047619047619047616,
0.030303030303030304,
0.03571428571428571,
0.024390243902439025,
0.1,
0.023809523809523808,
0,
0.03571428571428571,
0.034482758620689655,
0.125,
0.047619047619047616,
0.029411764705882353,
0.02857142857142857,
0.02857142857142857,
0,
0.043478260869565216,
0.016129032258064516,
0.023255813953488372,
0,
0.02247191011235955,
0.029411764705882353,
0.018518518518518517,
0,
0.041237113402061855,
0.05,
0,
0.022727272727272728,
0,
0.01818181818181818,
0.024390243902439025,
0,
0.07692307692307693,
0,
0,
0.09523809523809523,
0.027777777777777776,
0.024390243902439025
] | 105 | 0.024193 |
import sys
import os
import random
from raytracer.cartesian import *
from raytracer.colour import *
from raytracer.matrix import *
from raytracer.light import *
from raytracer.output import *
from raytracer.shape import *
from raytracer.view import *
from raytracer.scene import *
from raytracer.quadraticshapes import *
from raytracer.planarshapes import *
from raytracer.lighting_model import *
from raytracer.mapping import *
if __name__ == '__main__':
get_context().precision = 32
scene = Scene(True)
view = view_create(scene, -1, {'left': 0,
'right': 300,
'top': 0,
'bottom': 300},
# {'left':.1, 'right':.1, 'top':.1, 'bottom':.1}),
{'left': -5, 'right': 5, 'top': -5, 'bottom': 5})
view_set_antialias (view, False, 5, 5, True) #, True, .4)
view_set_output(view, PIL_Output())
view_set_multiprocessing(view, False, 2)
view_set_lighting_model (view, view[VIEW_LIGHTINGMODEL], {'NoShadows': True, 'NoDiffuse': False})
scene.add_view(view, 'view')
scene.add_light(light_point_light_create(cartesian_create(
0, 0, -5.5), colour_create(1, 1, 1)), 'light1')
for i in range(1, 500):
bands = []
count = int(round(random.random()*5,0))
for j in range (0, count+2):
bands.append(('colour', random.uniform(.5,1), random.uniform(.5,1), random.uniform(.5,1)))
sphere = shape_sphere_create(
('colour_mapping', sphere_map_to_rect,
BandedSprialTexture(bands)),
colour_create(0,0,0))
z = random.uniform(2, 10)
f = 2.5 #+s (z /1.4)
shape_set_transform(sphere, Transform({
'scale': {'x': random.uniform(.5, 3.5), 'y': random.uniform(.5, 3.5), 'z': random.uniform(.5, 3.5)},
'rotate': {'vector': cartesian_create(
random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)),
'angle':random.uniform(0, 360) },
'translate': {'x': random.uniform(-f, f), 'y': random.uniform(-f, f), 'z': z}
}))
scene.add_shape(sphere, 'sphere_%i'%i)
image = scene.render('view')
image.show()
| [
"import sys\n",
"import os\n",
"import random\n",
"from raytracer.cartesian import *\n",
"from raytracer.colour import *\n",
"from raytracer.matrix import *\n",
"from raytracer.light import *\n",
"from raytracer.output import *\n",
"from raytracer.shape import *\n",
"from raytracer.view import *\n",
"from raytracer.scene import *\n",
"from raytracer.quadraticshapes import *\n",
"from raytracer.planarshapes import *\n",
"from raytracer.lighting_model import *\n",
"from raytracer.mapping import *\n",
"\n",
"if __name__ == '__main__':\n",
" get_context().precision = 32\n",
"\n",
" scene = Scene(True)\n",
" view = view_create(scene, -1, {'left': 0,\n",
" 'right': 300,\n",
" 'top': 0,\n",
" 'bottom': 300},\n",
" # {'left':.1, 'right':.1, 'top':.1, 'bottom':.1}),\n",
" {'left': -5, 'right': 5, 'top': -5, 'bottom': 5})\n",
"\n",
" \n",
" view_set_antialias (view, False, 5, 5, True) #, True, .4)\n",
" view_set_output(view, PIL_Output())\n",
" view_set_multiprocessing(view, False, 2)\n",
" view_set_lighting_model (view, view[VIEW_LIGHTINGMODEL], {'NoShadows': True, 'NoDiffuse': False})\n",
" scene.add_view(view, 'view')\n",
" scene.add_light(light_point_light_create(cartesian_create(\n",
" 0, 0, -5.5), colour_create(1, 1, 1)), 'light1')\n",
"\n",
" \n",
" for i in range(1, 500):\n",
" \n",
" bands = []\n",
" count = int(round(random.random()*5,0))\n",
" for j in range (0, count+2):\n",
" bands.append(('colour', random.uniform(.5,1), random.uniform(.5,1), random.uniform(.5,1)))\n",
"\n",
"\n",
" sphere = shape_sphere_create(\n",
" ('colour_mapping', sphere_map_to_rect,\n",
" BandedSprialTexture(bands)),\n",
" colour_create(0,0,0))\n",
" \n",
" \n",
" z = random.uniform(2, 10)\n",
" f = 2.5 #+s (z /1.4)\n",
" \n",
" shape_set_transform(sphere, Transform({\n",
" 'scale': {'x': random.uniform(.5, 3.5), 'y': random.uniform(.5, 3.5), 'z': random.uniform(.5, 3.5)},\n",
" 'rotate': {'vector': cartesian_create(\n",
" random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)),\n",
" 'angle':random.uniform(0, 360) },\n",
" 'translate': {'x': random.uniform(-f, f), 'y': random.uniform(-f, f), 'z': z}\n",
" }))\n",
"\n",
"\n",
" scene.add_shape(sphere, 'sphere_%i'%i)\n",
"\n",
" \n",
" image = scene.render('view')\n",
" image.show()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.02564102564102564,
0.022222222222222223,
0,
0,
0,
0.041666666666666664,
0.06451612903225806,
0,
0,
0.0196078431372549,
0,
0,
0.019230769230769232,
0,
0.2,
0.03571428571428571,
0.2,
0,
0.020833333333333332,
0.02702702702702703,
0.038834951456310676,
0,
0,
0.02631578947368421,
0,
0.024390243902439025,
0.058823529411764705,
0.1111111111111111,
0.1111111111111111,
0.05714285714285714,
0.06896551724137931,
0.07692307692307693,
0,
0.008849557522123894,
0,
0.01098901098901099,
0.05172413793103448,
0.011111111111111112,
0,
0,
0,
0.0425531914893617,
0,
0.1111111111111111,
0.030303030303030304,
0
] | 68 | 0.022647 |
import prairielearn as pl
import lxml.html
from html import escape
import chevron
import os
LANGUAGE_DEFAULT = None
NO_HIGHLIGHT_DEFAULT = False
SOURCE_FILE_NAME_DEFAULT = None
PREVENT_SELECT_DEFAULT = False
HIGHLIGHT_LINES_DEFAULT = None
HIGHLIGHT_LINES_COLOR_DEFAULT = '#b3d7ff'
allowed_languages = [
'armasm',
'bash',
'cpp',
'csharp',
'css',
'excel',
'fortran',
'go',
'haskell',
'html',
'ini',
'java',
'javascript',
'json',
'julia',
'makefile',
'markdown',
'mathematica',
'matlab',
'mipsasm',
'objectivec',
'ocaml',
'perl',
'php',
'plaintext',
'python',
'r',
'ruby',
'shell',
'sql',
'tex',
'x86asm',
'yaml',
]
def parse_highlight_lines(highlight_lines):
"""
Parses a string like "1", "1-4", "1-3,5,7-8" into lists of tuples like
[(1,1)], [(1,4)], and [(1,3),(5,5),(7,8)]
"""
lines = []
components = highlight_lines.split(',')
for component in components:
component = component.strip()
try:
line = int(component)
lines.append((line, line))
except ValueError:
# Try parsing as "##-###"
numbers = component.split('-')
if len(numbers) != 2:
return None
try:
start = int(numbers[0])
end = int(numbers[1])
lines.append((start, end))
except ValueError:
return None
return lines
def line_should_be_highlighted(line_number, lines_to_highlight):
"""
Takes an array like that produced by parse_highlight_lines and determines
if a line of code satisfies the range.
"""
for pair in lines_to_highlight:
start, end = pair
if line_number >= start and line_number <= end:
return True
return False
def highlight_lines_in_code(code, highlight_lines, color):
lines_to_highlight = parse_highlight_lines(highlight_lines)
code_lines = code.splitlines()
line_number = 1
result_lines = ''
for line in code_lines:
if line_should_be_highlighted(line_number, lines_to_highlight):
if len(line.strip()) == 0:
# insert line break to prevent collapsing the line
line = '<br>'
result_lines += '<span class="pl-code-highlighted-line" style="background-color: ' + color + ';">' + line + '\n</span>'
else:
result_lines += line + '\n'
line_number += 1
return result_lines
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
required_attribs = []
optional_attribs = ['language', 'no-highlight', 'source-file-name', 'prevent-select', 'highlight-lines', 'highlight-lines-color']
pl.check_attribs(element, required_attribs, optional_attribs)
language = pl.get_string_attrib(element, 'language', LANGUAGE_DEFAULT)
if language is not None:
if language not in allowed_languages:
raise Exception(f'Unknown language: "{language}". Must be one of {",".join(allowed_languages)}')
source_file_name = pl.get_string_attrib(element, 'source-file-name', SOURCE_FILE_NAME_DEFAULT)
if source_file_name is not None:
if element.text is not None and not str(element.text).isspace():
raise Exception('Existing code cannot be added inside html element when "source-file-name" attribute is used.')
highlight_lines = pl.get_string_attrib(element, 'highlight-lines', HIGHLIGHT_LINES_DEFAULT)
if highlight_lines is not None:
if parse_highlight_lines(highlight_lines) is None:
raise Exception('Could not parse highlight-lines attribute; check your syntax')
def render(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
language = pl.get_string_attrib(element, 'language', LANGUAGE_DEFAULT)
no_highlight = pl.get_boolean_attrib(element, 'no-highlight', NO_HIGHLIGHT_DEFAULT)
specify_language = (language is not None) and (not no_highlight)
source_file_name = pl.get_string_attrib(element, 'source-file-name', SOURCE_FILE_NAME_DEFAULT)
prevent_select = pl.get_boolean_attrib(element, 'prevent-select', PREVENT_SELECT_DEFAULT)
highlight_lines = pl.get_string_attrib(element, 'highlight-lines', HIGHLIGHT_LINES_DEFAULT)
highlight_lines_color = pl.get_string_attrib(element, 'highlight-lines-color', HIGHLIGHT_LINES_COLOR_DEFAULT)
if source_file_name is not None:
base_path = data['options']['question_path']
file_path = os.path.join(base_path, source_file_name)
if not os.path.exists(file_path):
raise Exception(f'Unknown file path: "{file_path}".')
f = open(file_path, 'r')
code = ''
for line in f.readlines():
code += line
code = code[:-1]
f.close()
# Automatically escape code in file source (important for: html/xml).
code = escape(code)
else:
# Strip a single leading newline from the code, if present. This
# avoids having spurious newlines because of HTML like:
#
# <pl-code>
# some_code
# </pl-code>
#
# which technically starts with a newline, but we probably
# don't want a blank line at the start of the code block.
code = pl.inner_html(element)
if len(code) > 1 and code[0] == '\r' and code[1] == '\n':
code = code[2:]
elif len(code) > 0 and (code[0] == '\n' or code[0] == '\r'):
code = code[1:]
if highlight_lines is not None:
code = highlight_lines_in_code(code, highlight_lines, highlight_lines_color)
html_params = {
'specify_language': specify_language,
'language': language,
'no_highlight': no_highlight,
'code': code,
'prevent_select': prevent_select,
}
with open('pl-code.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
return html
| [
"import prairielearn as pl\n",
"import lxml.html\n",
"from html import escape\n",
"import chevron\n",
"import os\n",
"\n",
"LANGUAGE_DEFAULT = None\n",
"NO_HIGHLIGHT_DEFAULT = False\n",
"SOURCE_FILE_NAME_DEFAULT = None\n",
"PREVENT_SELECT_DEFAULT = False\n",
"HIGHLIGHT_LINES_DEFAULT = None\n",
"HIGHLIGHT_LINES_COLOR_DEFAULT = '#b3d7ff'\n",
"\n",
"allowed_languages = [\n",
" 'armasm',\n",
" 'bash',\n",
" 'cpp',\n",
" 'csharp',\n",
" 'css',\n",
" 'excel',\n",
" 'fortran',\n",
" 'go',\n",
" 'haskell',\n",
" 'html',\n",
" 'ini',\n",
" 'java',\n",
" 'javascript',\n",
" 'json',\n",
" 'julia',\n",
" 'makefile',\n",
" 'markdown',\n",
" 'mathematica',\n",
" 'matlab',\n",
" 'mipsasm',\n",
" 'objectivec',\n",
" 'ocaml',\n",
" 'perl',\n",
" 'php',\n",
" 'plaintext',\n",
" 'python',\n",
" 'r',\n",
" 'ruby',\n",
" 'shell',\n",
" 'sql',\n",
" 'tex',\n",
" 'x86asm',\n",
" 'yaml',\n",
"]\n",
"\n",
"\n",
"def parse_highlight_lines(highlight_lines):\n",
" \"\"\"\n",
" Parses a string like \"1\", \"1-4\", \"1-3,5,7-8\" into lists of tuples like\n",
" [(1,1)], [(1,4)], and [(1,3),(5,5),(7,8)]\n",
" \"\"\"\n",
" lines = []\n",
" components = highlight_lines.split(',')\n",
" for component in components:\n",
" component = component.strip()\n",
" try:\n",
" line = int(component)\n",
" lines.append((line, line))\n",
" except ValueError:\n",
" # Try parsing as \"##-###\"\n",
" numbers = component.split('-')\n",
" if len(numbers) != 2:\n",
" return None\n",
" try:\n",
" start = int(numbers[0])\n",
" end = int(numbers[1])\n",
" lines.append((start, end))\n",
" except ValueError:\n",
" return None\n",
" return lines\n",
"\n",
"\n",
"def line_should_be_highlighted(line_number, lines_to_highlight):\n",
" \"\"\"\n",
" Takes an array like that produced by parse_highlight_lines and determines\n",
" if a line of code satisfies the range.\n",
" \"\"\"\n",
" for pair in lines_to_highlight:\n",
" start, end = pair\n",
" if line_number >= start and line_number <= end:\n",
" return True\n",
" return False\n",
"\n",
"\n",
"def highlight_lines_in_code(code, highlight_lines, color):\n",
" lines_to_highlight = parse_highlight_lines(highlight_lines)\n",
" code_lines = code.splitlines()\n",
" line_number = 1\n",
" result_lines = ''\n",
" for line in code_lines:\n",
" if line_should_be_highlighted(line_number, lines_to_highlight):\n",
" if len(line.strip()) == 0:\n",
" # insert line break to prevent collapsing the line\n",
" line = '<br>'\n",
" result_lines += '<span class=\"pl-code-highlighted-line\" style=\"background-color: ' + color + ';\">' + line + '\\n</span>'\n",
" else:\n",
" result_lines += line + '\\n'\n",
" line_number += 1\n",
" return result_lines\n",
"\n",
"\n",
"def prepare(element_html, data):\n",
" element = lxml.html.fragment_fromstring(element_html)\n",
" required_attribs = []\n",
" optional_attribs = ['language', 'no-highlight', 'source-file-name', 'prevent-select', 'highlight-lines', 'highlight-lines-color']\n",
" pl.check_attribs(element, required_attribs, optional_attribs)\n",
"\n",
" language = pl.get_string_attrib(element, 'language', LANGUAGE_DEFAULT)\n",
" if language is not None:\n",
" if language not in allowed_languages:\n",
" raise Exception(f'Unknown language: \"{language}\". Must be one of {\",\".join(allowed_languages)}')\n",
"\n",
" source_file_name = pl.get_string_attrib(element, 'source-file-name', SOURCE_FILE_NAME_DEFAULT)\n",
" if source_file_name is not None:\n",
" if element.text is not None and not str(element.text).isspace():\n",
" raise Exception('Existing code cannot be added inside html element when \"source-file-name\" attribute is used.')\n",
"\n",
" highlight_lines = pl.get_string_attrib(element, 'highlight-lines', HIGHLIGHT_LINES_DEFAULT)\n",
" if highlight_lines is not None:\n",
" if parse_highlight_lines(highlight_lines) is None:\n",
" raise Exception('Could not parse highlight-lines attribute; check your syntax')\n",
"\n",
"\n",
"def render(element_html, data):\n",
" element = lxml.html.fragment_fromstring(element_html)\n",
" language = pl.get_string_attrib(element, 'language', LANGUAGE_DEFAULT)\n",
" no_highlight = pl.get_boolean_attrib(element, 'no-highlight', NO_HIGHLIGHT_DEFAULT)\n",
" specify_language = (language is not None) and (not no_highlight)\n",
" source_file_name = pl.get_string_attrib(element, 'source-file-name', SOURCE_FILE_NAME_DEFAULT)\n",
" prevent_select = pl.get_boolean_attrib(element, 'prevent-select', PREVENT_SELECT_DEFAULT)\n",
" highlight_lines = pl.get_string_attrib(element, 'highlight-lines', HIGHLIGHT_LINES_DEFAULT)\n",
" highlight_lines_color = pl.get_string_attrib(element, 'highlight-lines-color', HIGHLIGHT_LINES_COLOR_DEFAULT)\n",
"\n",
" if source_file_name is not None:\n",
" base_path = data['options']['question_path']\n",
" file_path = os.path.join(base_path, source_file_name)\n",
" if not os.path.exists(file_path):\n",
" raise Exception(f'Unknown file path: \"{file_path}\".')\n",
" f = open(file_path, 'r')\n",
" code = ''\n",
" for line in f.readlines():\n",
" code += line\n",
" code = code[:-1]\n",
" f.close()\n",
" # Automatically escape code in file source (important for: html/xml).\n",
" code = escape(code)\n",
" else:\n",
" # Strip a single leading newline from the code, if present. This\n",
" # avoids having spurious newlines because of HTML like:\n",
" #\n",
" # <pl-code>\n",
" # some_code\n",
" # </pl-code>\n",
" #\n",
" # which technically starts with a newline, but we probably\n",
" # don't want a blank line at the start of the code block.\n",
" code = pl.inner_html(element)\n",
" if len(code) > 1 and code[0] == '\\r' and code[1] == '\\n':\n",
" code = code[2:]\n",
" elif len(code) > 0 and (code[0] == '\\n' or code[0] == '\\r'):\n",
" code = code[1:]\n",
"\n",
" if highlight_lines is not None:\n",
" code = highlight_lines_in_code(code, highlight_lines, highlight_lines_color)\n",
"\n",
" html_params = {\n",
" 'specify_language': specify_language,\n",
" 'language': language,\n",
" 'no_highlight': no_highlight,\n",
" 'code': code,\n",
" 'prevent_select': prevent_select,\n",
" }\n",
"\n",
" with open('pl-code.mustache', 'r', encoding='utf-8') as f:\n",
" html = chevron.render(f, html_params).strip()\n",
"\n",
" return html\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007575757575757576,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007462686567164179,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0.010101010101010102,
0,
0,
0.008064516129032258,
0,
0.010416666666666666,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0.010101010101010102,
0.010638297872340425,
0.010416666666666666,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 181 | 0.0007 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Quantum register reference object.
"""
from ._register import Register
class QuantumRegister(Register):
"""Implement a quantum register."""
def qasm(self):
"""Return OPENQASM string for this register."""
return "qreg %s[%d];" % (self.name, self.size)
def __str__(self):
"""Return a string representing the register."""
return "QuantumRegister(%s,%d)" % (self.name, self.size)
def __len__(self):
"""Return a int representing the size."""
return self.size
| [
"# -*- coding: utf-8 -*-\r\n",
"\r\n",
"# Copyright 2017 IBM RESEARCH. All Rights Reserved.\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"#\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"# =============================================================================\r\n",
"\r\n",
"\"\"\"\r\n",
"Quantum register reference object.\r\n",
"\"\"\"\r\n",
"from ._register import Register\r\n",
"\r\n",
"\r\n",
"class QuantumRegister(Register):\r\n",
" \"\"\"Implement a quantum register.\"\"\"\r\n",
"\r\n",
" def qasm(self):\r\n",
" \"\"\"Return OPENQASM string for this register.\"\"\"\r\n",
" return \"qreg %s[%d];\" % (self.name, self.size)\r\n",
"\r\n",
" def __str__(self):\r\n",
" \"\"\"Return a string representing the register.\"\"\"\r\n",
" return \"QuantumRegister(%s,%d)\" % (self.name, self.size)\r\n",
"\r\n",
" def __len__(self):\r\n",
" \"\"\"Return a int representing the size.\"\"\"\r\n",
" return self.size\r\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 37 | 0 |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
# flake8: noqa
from __future__ import unicode_literals
from django.test import LiveServerTestCase
from django.core.urlresolvers import reverse
from .apps import QuotationConfig
from .models import Quotation
from .models import QuotationProduct
from djangobmf.utils.testcases import DemoDataMixin
from djangobmf.utils.testcases import TestCase
from djangobmf.utils.testcases import ModuleMixin
from djangobmf.utils.testcases import ModuleTestFactory
class QuotationFactory(ModuleTestFactory, DemoDataMixin, TestCase):
app = QuotationConfig
class QuotationModuleTests(ModuleMixin, TestCase):
def test_urls_user(self):
"""
"""
self.model = Quotation
# data = self.autotest_ajax_get('create', kwargs={'key': 'default'})
# data = self.autotest_ajax_post('create', kwargs={'key': 'default'}, data={
# 'project': 1,
# 'customer': 1,
# 'date': '2012-01-01',
# 'employee': 1,
# 'bmf-products-TOTAL_FORMS': 1,
# 'bmf-products-INITIAL_FORMS': 0,
# 'bmf-products-MAX_NUM_FORMS': 1,
# 'bmf-products-0-product': 1,
# 'bmf-products-0-amount': 1,
# 'bmf-products-0-price': 100,
# 'bmf-products-0-name': "Service",
# })
# data = self.autotest_ajax_post('create', kwargs={'key': 'default'}, data={
# 'project': 2,
# 'customer': 2,
# 'date': '2012-01-01',
# 'employee': 1,
# 'bmf-products-TOTAL_FORMS': 1,
# 'bmf-products-INITIAL_FORMS': 0,
# 'bmf-products-MAX_NUM_FORMS': 1,
# 'bmf-products-0-product': 1,
# 'bmf-products-0-amount': 10,
# 'bmf-products-0-price': 10,
# 'bmf-products-0-name': "Service",
# })
# obj = self.get_latest_object()
# self.autotest_get('detail', kwargs={'pk': obj.pk}, api=False)
# data = self.autotest_ajax_get('update', kwargs={'pk': obj.pk})
# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'cancel'})
# self.autotest_get('delete', kwargs={'pk': obj.pk})
# self.autotest_post('delete', status_code=302, kwargs={'pk': obj.pk})
# obj = self.get_latest_object()
# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'send'})
# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'accept'})
# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'invoice'})
def test_cleans(self):
obj = Quotation()
obj.clean()
# obj = QuotationProduct()
# obj.product_id = 1
# obj.clean()
# self.assertIsNotNone(obj.name, "name should be read from product")
# self.assertIsNotNone(obj.price, "price should be read from product")
| [
"#!/usr/bin/python\n",
"# ex:set fileencoding=utf-8:\n",
"# flake8: noqa\n",
"\n",
"from __future__ import unicode_literals\n",
"\n",
"from django.test import LiveServerTestCase\n",
"from django.core.urlresolvers import reverse\n",
"\n",
"from .apps import QuotationConfig\n",
"from .models import Quotation\n",
"from .models import QuotationProduct\n",
"\n",
"from djangobmf.utils.testcases import DemoDataMixin\n",
"from djangobmf.utils.testcases import TestCase\n",
"from djangobmf.utils.testcases import ModuleMixin\n",
"from djangobmf.utils.testcases import ModuleTestFactory\n",
"\n",
"\n",
"class QuotationFactory(ModuleTestFactory, DemoDataMixin, TestCase):\n",
" app = QuotationConfig\n",
"\n",
"\n",
"class QuotationModuleTests(ModuleMixin, TestCase):\n",
"\n",
" def test_urls_user(self):\n",
" \"\"\"\n",
" \"\"\"\n",
" self.model = Quotation\n",
"\n",
"# data = self.autotest_ajax_get('create', kwargs={'key': 'default'})\n",
"# data = self.autotest_ajax_post('create', kwargs={'key': 'default'}, data={\n",
"# 'project': 1,\n",
"# 'customer': 1,\n",
"# 'date': '2012-01-01',\n",
"# 'employee': 1,\n",
"# 'bmf-products-TOTAL_FORMS': 1,\n",
"# 'bmf-products-INITIAL_FORMS': 0,\n",
"# 'bmf-products-MAX_NUM_FORMS': 1,\n",
"# 'bmf-products-0-product': 1,\n",
"# 'bmf-products-0-amount': 1,\n",
"# 'bmf-products-0-price': 100,\n",
"# 'bmf-products-0-name': \"Service\",\n",
"# })\n",
"# data = self.autotest_ajax_post('create', kwargs={'key': 'default'}, data={\n",
"# 'project': 2,\n",
"# 'customer': 2,\n",
"# 'date': '2012-01-01',\n",
"# 'employee': 1,\n",
"# 'bmf-products-TOTAL_FORMS': 1,\n",
"# 'bmf-products-INITIAL_FORMS': 0,\n",
"# 'bmf-products-MAX_NUM_FORMS': 1,\n",
"# 'bmf-products-0-product': 1,\n",
"# 'bmf-products-0-amount': 10,\n",
"# 'bmf-products-0-price': 10,\n",
"# 'bmf-products-0-name': \"Service\",\n",
"# })\n",
"\n",
"# obj = self.get_latest_object()\n",
"\n",
"# self.autotest_get('detail', kwargs={'pk': obj.pk}, api=False)\n",
"# data = self.autotest_ajax_get('update', kwargs={'pk': obj.pk})\n",
"# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'cancel'})\n",
"# self.autotest_get('delete', kwargs={'pk': obj.pk})\n",
"# self.autotest_post('delete', status_code=302, kwargs={'pk': obj.pk})\n",
"\n",
"# obj = self.get_latest_object()\n",
"\n",
"# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'send'})\n",
"# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'accept'})\n",
"# self.autotest_get('workflow', status_code=302, kwargs={'pk': obj.pk, 'transition': 'invoice'})\n",
"\n",
" def test_cleans(self):\n",
" obj = Quotation()\n",
" obj.clean()\n",
"\n",
"# obj = QuotationProduct()\n",
"# obj.product_id = 1\n",
"# obj.clean()\n",
"# self.assertIsNotNone(obj.name, \"name should be read from product\")\n",
"# self.assertIsNotNone(obj.price, \"price should be read from product\")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0.01,
0.00980392156862745,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 81 | 0.000783 |
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
import contextlib
import uuid
import webob.exc
from neutron import manager
from neutron.plugins.nuage.extensions import netpartition as netpart_ext
from neutron.tests.unit.nuage import test_nuage_plugin
from neutron.tests.unit import test_extensions
class NetPartitionTestExtensionManager(object):
def get_resources(self):
return netpart_ext.Netpartition.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class NetPartitionTestCase(test_nuage_plugin.NuagePluginV2TestCase):
def setUp(self):
ext_mgr = NetPartitionTestExtensionManager()
super(NetPartitionTestCase, self).setUp()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def _make_netpartition(self, fmt, name):
data = {
'net_partition': {
'name': name,
'tenant_id': uuid.uuid4()
}
}
netpart_req = self.new_create_request('net-partitions', data, fmt)
resp = netpart_req.get_response(self.ext_api)
if resp.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=resp.status_int)
return self.deserialize(fmt, resp)
def _del_netpartition(self, id):
self._delete('net-partitions', id)
@contextlib.contextmanager
def netpartition(self, name='netpartition1',
do_delete=True,
fmt=None,
**kwargs):
netpart = self._make_netpartition(fmt or self.fmt, name)
yield netpart
if do_delete:
self._del_netpartition(netpart['net_partition']['id'])
def test_create_netpartition(self):
name = 'netpart1'
keys = [('name', name)]
with self.netpartition(name=name) as netpart:
for k, v in keys:
self.assertEqual(netpart['net_partition'][k], v)
def test_delete_netpartition(self):
name = 'netpart1'
netpart = self._make_netpartition(self.fmt, name)
req = self.new_delete_request('net-partitions',
netpart['net_partition']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_show_netpartition(self):
with self.netpartition(name='netpart1') as npart:
req = self.new_show_request('net-partitions',
npart['net_partition']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['net_partition']['name'],
npart['net_partition']['name'])
def test_create_existing_default_netpartition(self):
name = 'default_test_np'
netpart1 = self._make_netpartition(self.fmt, name)
nuage_plugin = manager.NeutronManager.get_plugin()
netpart2 = nuage_plugin._create_default_net_partition(name)
self.assertEqual(netpart1['net_partition']['name'],
netpart2['name'])
| [
"# Copyright 2014 Alcatel-Lucent USA Inc.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"#\n",
"# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.\n",
"\n",
"import contextlib\n",
"import uuid\n",
"import webob.exc\n",
"\n",
"from neutron import manager\n",
"from neutron.plugins.nuage.extensions import netpartition as netpart_ext\n",
"from neutron.tests.unit.nuage import test_nuage_plugin\n",
"from neutron.tests.unit import test_extensions\n",
"\n",
"\n",
"class NetPartitionTestExtensionManager(object):\n",
" def get_resources(self):\n",
" return netpart_ext.Netpartition.get_resources()\n",
"\n",
" def get_actions(self):\n",
" return []\n",
"\n",
" def get_request_extensions(self):\n",
" return []\n",
"\n",
"\n",
"class NetPartitionTestCase(test_nuage_plugin.NuagePluginV2TestCase):\n",
" def setUp(self):\n",
" ext_mgr = NetPartitionTestExtensionManager()\n",
" super(NetPartitionTestCase, self).setUp()\n",
" self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)\n",
"\n",
" def _make_netpartition(self, fmt, name):\n",
" data = {\n",
" 'net_partition': {\n",
" 'name': name,\n",
" 'tenant_id': uuid.uuid4()\n",
" }\n",
" }\n",
" netpart_req = self.new_create_request('net-partitions', data, fmt)\n",
" resp = netpart_req.get_response(self.ext_api)\n",
" if resp.status_int >= webob.exc.HTTPClientError.code:\n",
" raise webob.exc.HTTPClientError(code=resp.status_int)\n",
" return self.deserialize(fmt, resp)\n",
"\n",
" def _del_netpartition(self, id):\n",
" self._delete('net-partitions', id)\n",
"\n",
" @contextlib.contextmanager\n",
" def netpartition(self, name='netpartition1',\n",
" do_delete=True,\n",
" fmt=None,\n",
" **kwargs):\n",
" netpart = self._make_netpartition(fmt or self.fmt, name)\n",
"\n",
" yield netpart\n",
" if do_delete:\n",
" self._del_netpartition(netpart['net_partition']['id'])\n",
"\n",
" def test_create_netpartition(self):\n",
" name = 'netpart1'\n",
" keys = [('name', name)]\n",
" with self.netpartition(name=name) as netpart:\n",
" for k, v in keys:\n",
" self.assertEqual(netpart['net_partition'][k], v)\n",
"\n",
" def test_delete_netpartition(self):\n",
" name = 'netpart1'\n",
" netpart = self._make_netpartition(self.fmt, name)\n",
" req = self.new_delete_request('net-partitions',\n",
" netpart['net_partition']['id'])\n",
" res = req.get_response(self.ext_api)\n",
" self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)\n",
"\n",
" def test_show_netpartition(self):\n",
" with self.netpartition(name='netpart1') as npart:\n",
" req = self.new_show_request('net-partitions',\n",
" npart['net_partition']['id'])\n",
" res = self.deserialize(self.fmt, req.get_response(self.ext_api))\n",
" self.assertEqual(res['net_partition']['name'],\n",
" npart['net_partition']['name'])\n",
"\n",
" def test_create_existing_default_netpartition(self):\n",
" name = 'default_test_np'\n",
" netpart1 = self._make_netpartition(self.fmt, name)\n",
" nuage_plugin = manager.NeutronManager.get_plugin()\n",
" netpart2 = nuage_plugin._create_default_net_partition(name)\n",
" self.assertEqual(netpart1['net_partition']['name'],\n",
" netpart2['name'])\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 100 | 0 |
import logging
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import desc
from sqlalchemy import Column, Boolean, func, or_
from flask import flash
from flask.ext.admin._compat import string_types
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.model import BaseModelView
from flask.ext.admin.actions import action
from flask.ext.admin._backwards import ObsoleteAttr
from flask.ext.admin.contrib.sqla import form, filters, tools
from .typefmt import DEFAULT_FORMATTERS
from .tools import is_inherited_primary_key, get_column_for_current_model, get_query_for_ids
from .ajax import create_ajax_loader
# Set up logger
log = logging.getLogger("flask-admin.sqla")
class ModelView(BaseModelView):
"""
SQLAlchemy model view
Usage sample::
admin = Admin()
admin.add_view(ModelView(User, db.session))
"""
column_auto_select_related = ObsoleteAttr('column_auto_select_related',
'auto_select_related',
True)
"""
Enable automatic detection of displayed foreign keys in this view
and perform automatic joined loading for related models to improve
query performance.
Please note that detection is not recursive: if `__unicode__` method
of related model uses another model to generate string representation, it
will still make separate database call.
"""
column_select_related_list = ObsoleteAttr('column_select_related',
'list_select_related',
None)
"""
List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`
property.
For example::
class PostAdmin(ModelView):
column_select_related_list = ('user', 'city')
You can also use properties::
class PostAdmin(ModelView):
column_select_related_list = (Post.user, Post.city)
Please refer to the `subqueryload` on list of possible values.
"""
column_display_all_relations = ObsoleteAttr('column_display_all_relations',
'list_display_all_relations',
False)
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns. Only text-based columns
are searchable (`String`, `Unicode`, `Text`, `UnicodeText`).
Example::
class MyModelView(ModelView):
column_searchable_list = ('name', 'email')
You can also pass columns::
class MyModelView(ModelView):
column_searchable_list = (User.name, User.email)
The following search rules apply:
- If you enter *ZZZ* in the UI search field, it will generate *ILIKE '%ZZZ%'*
statement against searchable columns.
- If you enter multiple words, each word will be searched separately, but
only rows that contain all words will be displayed. For example, searching
for 'abc def' will find all rows that contain 'abc' and 'def' in one or
more columns.
- If you prefix your search term with ^, it will find all rows
that start with ^. So, if you entered *^ZZZ*, *ILIKE 'ZZZ%'* will be used.
- If you prefix your search term with =, it will perform an exact match.
For example, if you entered *=ZZZ*, the statement *ILIKE 'ZZZ'* will be used.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of :class:`flask.ext.admin.contrib.sqla.filters.BaseFilter` classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = form.AdminModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = form.InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(AdminModelConverter):
def post_process(self, form_class, info):
form_class.value = wtf.TextField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using built in action,
all models will be read from the database and then deleted one by one
giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to `True`, will run a `DELETE` statement which is somewhat faster,
but may leave corrupted data if you forget to configure `DELETE
CASCADE` for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent-child relations.
Accepts enumerable with one of the following possible values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize the generated field name by:
1. Using the `form_name` property as a key to the options dictionary:
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using forward relation name and `column_labels` property:
class Model1(Base):
pass
class Model2(Base):
# ...
model1 = relation(Model1, backref='models')
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'models': 'Hello'}
"""
column_type_formatters = DEFAULT_FORMATTERS
form_choices = None
"""
Map choices to form fields
Example::
class MyModelView(BaseModelView):
form_choices = {'my_form_field': [
('db_value', 'display_value'),
]
"""
form_optional_types = (Boolean,)
"""
List of field types that should be optional if column is not nullable.
Example::
class MyModelView(BaseModelView):
form_optional_types = (Boolean, Unicode)
"""
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None):
"""
Constructor.
:param model:
Model class
:param session:
SQLAlchemy session
:param name:
View name. If not set, defaults to the model name
:param category:
Category name
:param endpoint:
Endpoint name. If not set, defaults to the model name
:param url:
Base URL. If not set, defaults to '/admin/' + endpoint
"""
self.session = session
self._search_fields = None
self._search_joins = dict()
self._filter_joins = dict()
if self.form_choices is None:
self.form_choices = {}
super(ModelView, self).__init__(model, name, category, endpoint, url)
# Primary key
self._primary_key = self.scaffold_pk()
if self._primary_key is None:
raise Exception('Model %s does not have primary key.' % self.model.__name__)
# Configuration
if not self.column_select_related_list:
self._auto_joins = self.scaffold_auto_joins()
else:
self._auto_joins = self.column_select_related_list
# Internal API
def _get_model_iterator(self, model=None):
"""
Return property iterator for the model
"""
if model is None:
model = self.model
return model._sa_class_manager.mapper.iterate_properties
# Scaffolding
def scaffold_pk(self):
"""
Return the primary key name from a model
PK can be a single value or a tuple if multiple PKs exist
"""
return tools.get_primary_key(self.model)
def get_pk_value(self, model):
"""
Return the PK value from a model object.
PK can be a single value or a tuple if multiple PKs exist
"""
try:
return getattr(model, self._primary_key)
except TypeError:
v = []
for attr in self._primary_key:
v.append(getattr(model, attr))
return tuple(v)
def scaffold_list_columns(self):
"""
Return a list of columns from the model.
"""
columns = []
for p in self._get_model_iterator():
# Verify type
if hasattr(p, 'direction'):
if self.column_display_all_relations or p.direction.name == 'MANYTOONE':
columns.append(p.key)
elif hasattr(p, 'columns'):
column_inherited_primary_key = False
if len(p.columns) != 1:
if is_inherited_primary_key(p):
column = get_column_for_current_model(p)
else:
raise TypeError('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key))
else:
# Grab column
column = p.columns[0]
# An inherited primary key has a foreign key as well
if column.foreign_keys and not is_inherited_primary_key(p):
continue
if not self.column_display_pk and column.primary_key:
continue
columns.append(p.key)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns.
Key is column name, value is sort column/field.
"""
columns = dict()
for p in self._get_model_iterator():
if hasattr(p, 'columns'):
# Sanity check
if len(p.columns) > 1:
# Multi-column properties are not supported
continue
column = p.columns[0]
# Can't sort on primary or foreign keys by default
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns[p.key] = column
return columns
def _get_columns_for_field(self, field):
if isinstance(field, string_types):
attr = getattr(self.model, field, None)
if field is None:
raise Exception('Field %s was not found.' % field)
else:
attr = field
if (not attr or
not hasattr(attr, 'property') or
not hasattr(attr.property, 'columns') or
not attr.property.columns):
raise Exception('Invalid field %s: does not contains any columns.' % field)
return attr.property.columns
def _need_join(self, table):
return table not in self.model._sa_class_manager.mapper.tables
def init_search(self):
"""
Initialize search. Returns `True` if search is supported for this
view.
For SQLAlchemy, this will initialize internal fields: list of
column objects used for filtering, etc.
"""
if self.column_searchable_list:
self._search_fields = []
self._search_joins = dict()
for p in self.column_searchable_list:
for column in self._get_columns_for_field(p):
column_type = type(column.type).__name__
if not self.is_text_column_type(column_type):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(column)
# If it belongs to different table - add a join
if self._need_join(column.table):
self._search_joins[column.table.name] = column.table
return bool(self.column_searchable_list)
def is_text_column_type(self, name):
"""
Verify if the provided column type is text-based.
:returns:
``True`` for ``String``, ``Unicode``, ``Text``, ``UnicodeText``
"""
if name:
name = name.lower()
return name in ('string', 'unicode', 'text', 'unicodetext')
def scaffold_filters(self, name):
"""
Return list of enabled filters
"""
join_tables = []
if isinstance(name, string_types):
model = self.model
for attribute in name.split('.'):
value = getattr(model, attribute)
if (hasattr(value, 'property') and
hasattr(value.property, 'direction')):
model = value.property.mapper.class_
table = model.__table__
if self._need_join(table):
join_tables.append(table)
attr = value
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Figure out filters for related column
if hasattr(attr, 'property') and hasattr(attr.property, 'direction'):
filters = []
for p in self._get_model_iterator(attr.property.mapper.class_):
if hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys or column.primary_key:
continue
visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),
self.get_column_name(p.key))
type_name = type(column.type).__name__
flt = self.filter_converter.convert(type_name,
column,
visible_name)
if flt:
table = column.table
if join_tables:
self._filter_joins[table.name] = join_tables
elif self._need_join(table.name):
self._filter_joins[table.name] = [table.name]
filters.extend(flt)
return filters
else:
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can not filter more than on one column for %s' % name)
column = columns[0]
if self._need_join(column.table) and name not in self.column_labels:
visible_name = '%s / %s' % (
self.get_column_name(column.table.name),
self.get_column_name(column.name)
)
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(name.property.key)
else:
visible_name = self.get_column_name(name)
type_name = type(column.type).__name__
if join_tables:
self._filter_joins[column.table.name] = join_tables
flt = self.filter_converter.convert(
type_name,
column,
visible_name,
options=self.column_choices.get(name),
)
if flt and not join_tables and self._need_join(column.table):
self._filter_joins[column.table.name] = [column.table]
return flt
def is_valid_filter(self, filter):
"""
Verify that the provided filter object is derived from the
SQLAlchemy-compatible filter class.
:param filter:
Filter object to verify.
"""
return isinstance(filter, filters.BaseSQLAFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_inline_form_models(self, form_class):
"""
Contribute inline models to the form
:param form_class:
Form class
"""
inline_converter = self.inline_model_form_converter(self.session,
self,
self.model_form_converter)
for m in self.inline_models:
form_class = inline_converter.contribute(self.model, form_class, m)
return form_class
def scaffold_auto_joins(self):
"""
Return a list of joined tables by going through the
displayed columns.
"""
if not self.column_auto_select_related:
return []
relations = set()
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
# Check if it is pointing to same model
if p.mapper.class_ == self.model:
continue
if p.direction.name in ['MANYTOONE', 'MANYTOMANY']:
relations.add(p.key)
joined = []
for prop, name in self._list_columns:
if prop in relations:
joined.append(getattr(self.model, prop))
return joined
# AJAX foreignkey support
def _create_ajax_loader(self, name, options):
return create_ajax_loader(self.model, self.session, name, name, options)
# Database-related API
def get_query(self):
"""
Return a query for the model type.
If you override this method, don't forget to override `get_count_query` as well.
"""
return self.session.query(self.model)
def get_count_query(self):
"""
Return a the count query for the model type
"""
return self.session.query(func.count('*')).select_from(self.model)
def _order_by(self, query, joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:param joins:
Joins set
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
# TODO: Preprocessing for joins
# Try to handle it as a string
if isinstance(sort_field, string_types):
# Create automatic join against a table if column name
# contains dot.
if '.' in sort_field:
parts = sort_field.split('.', 1)
if parts[0] not in joins:
query = query.join(parts[0])
joins.add(parts[0])
elif isinstance(sort_field, InstrumentedAttribute):
# SQLAlchemy 0.8+ uses 'parent' as a name
mapper = getattr(sort_field, 'parent', None)
if mapper is None:
# SQLAlchemy 0.7.x uses parententity
mapper = getattr(sort_field, 'parententity', None)
if mapper is not None:
table = mapper.tables[0]
if self._need_join(table) and table.name not in joins:
query = query.outerjoin(table)
joins.add(table.name)
elif isinstance(sort_field, Column):
pass
else:
raise TypeError('Wrong argument type')
if sort_field is not None:
if sort_desc:
query = query.order_by(desc(sort_field))
else:
query = query.order_by(sort_field)
return query, joins
def _get_default_order(self):
order = super(ModelView, self)._get_default_order()
if order is not None:
field, direction = order
if isinstance(field, string_types):
field = getattr(self.model, field)
return field, direction
return None
def get_list(self, page, sort_column, sort_desc, search, filters, execute=True):
"""
Return models from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param execute:
Execute query immediately? Default is `True`
:param filters:
List of filter tuples
"""
# Will contain names of joined tables to avoid duplicate joins
joins = set()
query = self.get_query()
count_query = self.get_count_query()
# Apply search criteria
if self._search_supported and search:
# Apply search-related joins
if self._search_joins:
for jn in self._search_joins.values():
query = query.join(jn)
count_query = count_query.join(jn)
joins = set(self._search_joins.keys())
# Apply terms
terms = search.split(' ')
for term in terms:
if not term:
continue
stmt = tools.parse_like_term(term)
filter_stmt = [c.ilike(stmt) for c in self._search_fields]
query = query.filter(or_(*filter_stmt))
count_query = count_query.filter(or_(*filter_stmt))
# Apply filters
if filters and self._filters:
for idx, value in filters:
flt = self._filters[idx]
# Figure out joins
tbl = flt.column.table.name
join_tables = self._filter_joins.get(tbl, [])
for table in join_tables:
if table.name not in joins:
query = query.join(table)
count_query = count_query.join(table)
joins.add(table.name)
# Apply filter
query = flt.apply(query, value)
count_query = flt.apply(count_query, value)
# Calculate number of rows
count = count_query.scalar()
# Auto join
for j in self._auto_joins:
query = query.options(joinedload(j))
# Sorting
if sort_column is not None:
if sort_column in self._sortable_columns:
sort_field = self._sortable_columns[sort_column]
query, joins = self._order_by(query, joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
query, joins = self._order_by(query, joins, order[0], order[1])
# Pagination
if page is not None:
query = query.offset(page * self.page_size)
query = query.limit(self.page_size)
# Execute if needed
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model by its id.
:param id:
Model id
"""
return self.session.query(self.model).get(id)
# Model handlers
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if self._debug:
raise
flash(gettext('Failed to create model. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
"""
Update model from form.
:param form:
Form instance
:param model:
Model instance
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
self.session.commit()
except Exception as ex:
if self._debug:
raise
flash(gettext('Failed to update model. %(error)s', error=str(ex)), 'error')
log.exception('Failed to update model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model.
:param model:
Model to delete
"""
try:
self.on_model_delete(model)
self.session.flush()
self.session.delete(model)
self.session.commit()
return True
except Exception as ex:
if self._debug:
raise
flash(gettext('Failed to delete model. %(error)s', error=str(ex)), 'error')
log.exception('Failed to delete model')
self.session.rollback()
return False
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected models?'))
def action_delete(self, ids):
try:
query = get_query_for_ids(self.get_query(), self.model, ids)
if self.fast_mass_delete:
count = query.delete(synchronize_session=False)
else:
count = 0
for m in query.all():
self.session.delete(m)
count += 1
self.session.commit()
flash(ngettext('Model was successfully deleted.',
'%(count)s models were successfully deleted.',
count,
count=count))
except Exception as ex:
if self._debug:
raise
flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')
| [
"import logging\n",
"\n",
"from sqlalchemy.orm.attributes import InstrumentedAttribute\n",
"from sqlalchemy.orm import joinedload\n",
"from sqlalchemy.sql.expression import desc\n",
"from sqlalchemy import Column, Boolean, func, or_\n",
"\n",
"from flask import flash\n",
"\n",
"from flask.ext.admin._compat import string_types\n",
"from flask.ext.admin.babel import gettext, ngettext, lazy_gettext\n",
"from flask.ext.admin.model import BaseModelView\n",
"from flask.ext.admin.actions import action\n",
"from flask.ext.admin._backwards import ObsoleteAttr\n",
"\n",
"from flask.ext.admin.contrib.sqla import form, filters, tools\n",
"from .typefmt import DEFAULT_FORMATTERS\n",
"from .tools import is_inherited_primary_key, get_column_for_current_model, get_query_for_ids\n",
"from .ajax import create_ajax_loader\n",
"\n",
"\n",
"# Set up logger\n",
"log = logging.getLogger(\"flask-admin.sqla\")\n",
"\n",
"\n",
"class ModelView(BaseModelView):\n",
" \"\"\"\n",
" SQLAlchemy model view\n",
"\n",
" Usage sample::\n",
"\n",
" admin = Admin()\n",
" admin.add_view(ModelView(User, db.session))\n",
" \"\"\"\n",
"\n",
" column_auto_select_related = ObsoleteAttr('column_auto_select_related',\n",
" 'auto_select_related',\n",
" True)\n",
" \"\"\"\n",
" Enable automatic detection of displayed foreign keys in this view\n",
" and perform automatic joined loading for related models to improve\n",
" query performance.\n",
"\n",
" Please note that detection is not recursive: if `__unicode__` method\n",
" of related model uses another model to generate string representation, it\n",
" will still make separate database call.\n",
" \"\"\"\n",
"\n",
" column_select_related_list = ObsoleteAttr('column_select_related',\n",
" 'list_select_related',\n",
" None)\n",
" \"\"\"\n",
" List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`\n",
" property.\n",
"\n",
" For example::\n",
"\n",
" class PostAdmin(ModelView):\n",
" column_select_related_list = ('user', 'city')\n",
"\n",
" You can also use properties::\n",
"\n",
" class PostAdmin(ModelView):\n",
" column_select_related_list = (Post.user, Post.city)\n",
"\n",
" Please refer to the `subqueryload` on list of possible values.\n",
" \"\"\"\n",
"\n",
" column_display_all_relations = ObsoleteAttr('column_display_all_relations',\n",
" 'list_display_all_relations',\n",
" False)\n",
" \"\"\"\n",
" Controls if list view should display all relations, not only many-to-one.\n",
" \"\"\"\n",
"\n",
" column_searchable_list = ObsoleteAttr('column_searchable_list',\n",
" 'searchable_columns',\n",
" None)\n",
" \"\"\"\n",
" Collection of the searchable columns. Only text-based columns\n",
" are searchable (`String`, `Unicode`, `Text`, `UnicodeText`).\n",
"\n",
" Example::\n",
"\n",
" class MyModelView(ModelView):\n",
" column_searchable_list = ('name', 'email')\n",
"\n",
" You can also pass columns::\n",
"\n",
" class MyModelView(ModelView):\n",
" column_searchable_list = (User.name, User.email)\n",
"\n",
" The following search rules apply:\n",
"\n",
" - If you enter *ZZZ* in the UI search field, it will generate *ILIKE '%ZZZ%'*\n",
" statement against searchable columns.\n",
"\n",
" - If you enter multiple words, each word will be searched separately, but\n",
" only rows that contain all words will be displayed. For example, searching\n",
" for 'abc def' will find all rows that contain 'abc' and 'def' in one or\n",
" more columns.\n",
"\n",
" - If you prefix your search term with ^, it will find all rows\n",
" that start with ^. So, if you entered *^ZZZ*, *ILIKE 'ZZZ%'* will be used.\n",
"\n",
" - If you prefix your search term with =, it will perform an exact match.\n",
" For example, if you entered *=ZZZ*, the statement *ILIKE 'ZZZ'* will be used.\n",
" \"\"\"\n",
"\n",
" column_filters = None\n",
" \"\"\"\n",
" Collection of the column filters.\n",
"\n",
" Can contain either field names or instances of :class:`flask.ext.admin.contrib.sqla.filters.BaseFilter` classes.\n",
"\n",
" For example::\n",
"\n",
" class MyModelView(BaseModelView):\n",
" column_filters = ('user', 'email')\n",
"\n",
" or::\n",
"\n",
" class MyModelView(BaseModelView):\n",
" column_filters = (BooleanEqualFilter(User.name, 'Name'))\n",
" \"\"\"\n",
"\n",
" model_form_converter = form.AdminModelConverter\n",
" \"\"\"\n",
" Model form conversion class. Use this to implement custom field conversion logic.\n",
"\n",
" For example::\n",
"\n",
" class MyModelConverter(AdminModelConverter):\n",
" pass\n",
"\n",
"\n",
" class MyAdminView(ModelView):\n",
" model_form_converter = MyModelConverter\n",
" \"\"\"\n",
"\n",
" inline_model_form_converter = form.InlineModelConverter\n",
" \"\"\"\n",
" Inline model conversion class. If you need some kind of post-processing for inline\n",
" forms, you can customize behavior by doing something like this::\n",
"\n",
" class MyInlineModelConverter(AdminModelConverter):\n",
" def post_process(self, form_class, info):\n",
" form_class.value = wtf.TextField('value')\n",
" return form_class\n",
"\n",
" class MyAdminView(ModelView):\n",
" inline_model_form_converter = MyInlineModelConverter\n",
" \"\"\"\n",
"\n",
" filter_converter = filters.FilterConverter()\n",
" \"\"\"\n",
" Field to filter converter.\n",
"\n",
" Override this attribute to use non-default converter.\n",
" \"\"\"\n",
"\n",
" fast_mass_delete = False\n",
" \"\"\"\n",
" If set to `False` and user deletes more than one model using built in action,\n",
" all models will be read from the database and then deleted one by one\n",
" giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many\n",
" relationships, etc).\n",
"\n",
" If set to `True`, will run a `DELETE` statement which is somewhat faster,\n",
" but may leave corrupted data if you forget to configure `DELETE\n",
" CASCADE` for your model.\n",
" \"\"\"\n",
"\n",
" inline_models = None\n",
" \"\"\"\n",
" Inline related-model editing for models with parent-child relations.\n",
"\n",
" Accepts enumerable with one of the following possible values:\n",
"\n",
" 1. Child model class::\n",
"\n",
" class MyModelView(ModelView):\n",
" inline_models = (Post,)\n",
"\n",
" 2. Child model class and additional options::\n",
"\n",
" class MyModelView(ModelView):\n",
" inline_models = [(Post, dict(form_columns=['title']))]\n",
"\n",
" 3. Django-like ``InlineFormAdmin`` class instance::\n",
"\n",
" class MyInlineModelForm(InlineFormAdmin):\n",
" form_columns = ('title', 'date')\n",
"\n",
" class MyModelView(ModelView):\n",
" inline_models = (MyInlineModelForm(MyInlineModel),)\n",
"\n",
" You can customize the generated field name by:\n",
"\n",
" 1. Using the `form_name` property as a key to the options dictionary:\n",
"\n",
" class MyModelView(ModelView):\n",
" inline_models = ((Post, dict(form_label='Hello')))\n",
"\n",
" 2. Using forward relation name and `column_labels` property:\n",
"\n",
" class Model1(Base):\n",
" pass\n",
"\n",
" class Model2(Base):\n",
" # ...\n",
" model1 = relation(Model1, backref='models')\n",
"\n",
" class MyModel1View(Base):\n",
" inline_models = (Model2,)\n",
" column_labels = {'models': 'Hello'}\n",
" \"\"\"\n",
"\n",
" column_type_formatters = DEFAULT_FORMATTERS\n",
"\n",
" form_choices = None\n",
" \"\"\"\n",
" Map choices to form fields\n",
"\n",
" Example::\n",
"\n",
" class MyModelView(BaseModelView):\n",
" form_choices = {'my_form_field': [\n",
" ('db_value', 'display_value'),\n",
" ]\n",
" \"\"\"\n",
"\n",
" form_optional_types = (Boolean,)\n",
" \"\"\"\n",
" List of field types that should be optional if column is not nullable.\n",
"\n",
" Example::\n",
"\n",
" class MyModelView(BaseModelView):\n",
" form_optional_types = (Boolean, Unicode)\n",
" \"\"\"\n",
"\n",
" def __init__(self, model, session,\n",
" name=None, category=None, endpoint=None, url=None):\n",
" \"\"\"\n",
" Constructor.\n",
"\n",
" :param model:\n",
" Model class\n",
" :param session:\n",
" SQLAlchemy session\n",
" :param name:\n",
" View name. If not set, defaults to the model name\n",
" :param category:\n",
" Category name\n",
" :param endpoint:\n",
" Endpoint name. If not set, defaults to the model name\n",
" :param url:\n",
" Base URL. If not set, defaults to '/admin/' + endpoint\n",
" \"\"\"\n",
" self.session = session\n",
"\n",
" self._search_fields = None\n",
" self._search_joins = dict()\n",
"\n",
" self._filter_joins = dict()\n",
"\n",
" if self.form_choices is None:\n",
" self.form_choices = {}\n",
"\n",
" super(ModelView, self).__init__(model, name, category, endpoint, url)\n",
"\n",
" # Primary key\n",
" self._primary_key = self.scaffold_pk()\n",
"\n",
" if self._primary_key is None:\n",
" raise Exception('Model %s does not have primary key.' % self.model.__name__)\n",
"\n",
" # Configuration\n",
" if not self.column_select_related_list:\n",
" self._auto_joins = self.scaffold_auto_joins()\n",
" else:\n",
" self._auto_joins = self.column_select_related_list\n",
"\n",
" # Internal API\n",
" def _get_model_iterator(self, model=None):\n",
" \"\"\"\n",
" Return property iterator for the model\n",
" \"\"\"\n",
" if model is None:\n",
" model = self.model\n",
"\n",
" return model._sa_class_manager.mapper.iterate_properties\n",
"\n",
" # Scaffolding\n",
" def scaffold_pk(self):\n",
" \"\"\"\n",
" Return the primary key name from a model\n",
" PK can be a single value or a tuple if multiple PKs exist\n",
" \"\"\"\n",
" return tools.get_primary_key(self.model)\n",
"\n",
" def get_pk_value(self, model):\n",
" \"\"\"\n",
" Return the PK value from a model object.\n",
" PK can be a single value or a tuple if multiple PKs exist\n",
" \"\"\"\n",
" try:\n",
" return getattr(model, self._primary_key)\n",
" except TypeError:\n",
" v = []\n",
" for attr in self._primary_key:\n",
" v.append(getattr(model, attr))\n",
" return tuple(v)\n",
"\n",
" def scaffold_list_columns(self):\n",
" \"\"\"\n",
" Return a list of columns from the model.\n",
" \"\"\"\n",
" columns = []\n",
"\n",
" for p in self._get_model_iterator():\n",
" # Verify type\n",
" if hasattr(p, 'direction'):\n",
" if self.column_display_all_relations or p.direction.name == 'MANYTOONE':\n",
" columns.append(p.key)\n",
" elif hasattr(p, 'columns'):\n",
" column_inherited_primary_key = False\n",
"\n",
" if len(p.columns) != 1:\n",
" if is_inherited_primary_key(p):\n",
" column = get_column_for_current_model(p)\n",
" else:\n",
" raise TypeError('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key))\n",
" else:\n",
" # Grab column\n",
" column = p.columns[0]\n",
"\n",
" # An inherited primary key has a foreign key as well\n",
" if column.foreign_keys and not is_inherited_primary_key(p):\n",
" continue\n",
"\n",
" if not self.column_display_pk and column.primary_key:\n",
" continue\n",
"\n",
" columns.append(p.key)\n",
"\n",
" return columns\n",
"\n",
" def scaffold_sortable_columns(self):\n",
" \"\"\"\n",
" Return a dictionary of sortable columns.\n",
" Key is column name, value is sort column/field.\n",
" \"\"\"\n",
" columns = dict()\n",
"\n",
" for p in self._get_model_iterator():\n",
" if hasattr(p, 'columns'):\n",
" # Sanity check\n",
" if len(p.columns) > 1:\n",
" # Multi-column properties are not supported\n",
" continue\n",
"\n",
" column = p.columns[0]\n",
"\n",
" # Can't sort on primary or foreign keys by default\n",
" if column.foreign_keys:\n",
" continue\n",
"\n",
" if not self.column_display_pk and column.primary_key:\n",
" continue\n",
"\n",
" columns[p.key] = column\n",
"\n",
" return columns\n",
"\n",
" def _get_columns_for_field(self, field):\n",
" if isinstance(field, string_types):\n",
" attr = getattr(self.model, field, None)\n",
"\n",
" if field is None:\n",
" raise Exception('Field %s was not found.' % field)\n",
" else:\n",
" attr = field\n",
"\n",
" if (not attr or\n",
" not hasattr(attr, 'property') or\n",
" not hasattr(attr.property, 'columns') or\n",
" not attr.property.columns):\n",
" raise Exception('Invalid field %s: does not contains any columns.' % field)\n",
"\n",
" return attr.property.columns\n",
"\n",
" def _need_join(self, table):\n",
" return table not in self.model._sa_class_manager.mapper.tables\n",
"\n",
" def init_search(self):\n",
" \"\"\"\n",
" Initialize search. Returns `True` if search is supported for this\n",
" view.\n",
"\n",
" For SQLAlchemy, this will initialize internal fields: list of\n",
" column objects used for filtering, etc.\n",
" \"\"\"\n",
" if self.column_searchable_list:\n",
" self._search_fields = []\n",
" self._search_joins = dict()\n",
"\n",
" for p in self.column_searchable_list:\n",
" for column in self._get_columns_for_field(p):\n",
" column_type = type(column.type).__name__\n",
"\n",
" if not self.is_text_column_type(column_type):\n",
" raise Exception('Can only search on text columns. ' +\n",
" 'Failed to setup search for \"%s\"' % p)\n",
"\n",
" self._search_fields.append(column)\n",
"\n",
" # If it belongs to different table - add a join\n",
" if self._need_join(column.table):\n",
" self._search_joins[column.table.name] = column.table\n",
"\n",
" return bool(self.column_searchable_list)\n",
"\n",
" def is_text_column_type(self, name):\n",
" \"\"\"\n",
" Verify if the provided column type is text-based.\n",
"\n",
" :returns:\n",
" ``True`` for ``String``, ``Unicode``, ``Text``, ``UnicodeText``\n",
" \"\"\"\n",
" if name:\n",
" name = name.lower()\n",
"\n",
" return name in ('string', 'unicode', 'text', 'unicodetext')\n",
"\n",
" def scaffold_filters(self, name):\n",
" \"\"\"\n",
" Return list of enabled filters\n",
" \"\"\"\n",
"\n",
" join_tables = []\n",
" if isinstance(name, string_types):\n",
" model = self.model\n",
"\n",
" for attribute in name.split('.'):\n",
" value = getattr(model, attribute)\n",
" if (hasattr(value, 'property') and\n",
" hasattr(value.property, 'direction')):\n",
" model = value.property.mapper.class_\n",
" table = model.__table__\n",
"\n",
" if self._need_join(table):\n",
" join_tables.append(table)\n",
"\n",
" attr = value\n",
" else:\n",
" attr = name\n",
"\n",
" if attr is None:\n",
" raise Exception('Failed to find field for filter: %s' % name)\n",
"\n",
" # Figure out filters for related column\n",
" if hasattr(attr, 'property') and hasattr(attr.property, 'direction'):\n",
" filters = []\n",
"\n",
" for p in self._get_model_iterator(attr.property.mapper.class_):\n",
" if hasattr(p, 'columns'):\n",
" # TODO: Check for multiple columns\n",
" column = p.columns[0]\n",
"\n",
" if column.foreign_keys or column.primary_key:\n",
" continue\n",
"\n",
" visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),\n",
" self.get_column_name(p.key))\n",
"\n",
" type_name = type(column.type).__name__\n",
" flt = self.filter_converter.convert(type_name,\n",
" column,\n",
" visible_name)\n",
"\n",
" if flt:\n",
" table = column.table\n",
"\n",
" if join_tables:\n",
" self._filter_joins[table.name] = join_tables\n",
" elif self._need_join(table.name):\n",
" self._filter_joins[table.name] = [table.name]\n",
" filters.extend(flt)\n",
"\n",
" return filters\n",
" else:\n",
" columns = self._get_columns_for_field(attr)\n",
"\n",
" if len(columns) > 1:\n",
" raise Exception('Can not filter more than on one column for %s' % name)\n",
"\n",
" column = columns[0]\n",
"\n",
" if self._need_join(column.table) and name not in self.column_labels:\n",
" visible_name = '%s / %s' % (\n",
" self.get_column_name(column.table.name),\n",
" self.get_column_name(column.name)\n",
" )\n",
" else:\n",
" if not isinstance(name, string_types):\n",
" visible_name = self.get_column_name(name.property.key)\n",
" else:\n",
" visible_name = self.get_column_name(name)\n",
"\n",
" type_name = type(column.type).__name__\n",
"\n",
" if join_tables:\n",
" self._filter_joins[column.table.name] = join_tables\n",
"\n",
" flt = self.filter_converter.convert(\n",
" type_name,\n",
" column,\n",
" visible_name,\n",
" options=self.column_choices.get(name),\n",
" )\n",
"\n",
" if flt and not join_tables and self._need_join(column.table):\n",
" self._filter_joins[column.table.name] = [column.table]\n",
"\n",
" return flt\n",
"\n",
" def is_valid_filter(self, filter):\n",
" \"\"\"\n",
" Verify that the provided filter object is derived from the\n",
" SQLAlchemy-compatible filter class.\n",
"\n",
" :param filter:\n",
" Filter object to verify.\n",
" \"\"\"\n",
" return isinstance(filter, filters.BaseSQLAFilter)\n",
"\n",
" def scaffold_form(self):\n",
" \"\"\"\n",
" Create form from the model.\n",
" \"\"\"\n",
" converter = self.model_form_converter(self.session, self)\n",
" form_class = form.get_form(self.model, converter,\n",
" base_class=self.form_base_class,\n",
" only=self.form_columns,\n",
" exclude=self.form_excluded_columns,\n",
" field_args=self.form_args,\n",
" extra_fields=self.form_extra_fields)\n",
"\n",
" if self.inline_models:\n",
" form_class = self.scaffold_inline_form_models(form_class)\n",
"\n",
" return form_class\n",
"\n",
" def scaffold_inline_form_models(self, form_class):\n",
" \"\"\"\n",
" Contribute inline models to the form\n",
"\n",
" :param form_class:\n",
" Form class\n",
" \"\"\"\n",
" inline_converter = self.inline_model_form_converter(self.session,\n",
" self,\n",
" self.model_form_converter)\n",
"\n",
" for m in self.inline_models:\n",
" form_class = inline_converter.contribute(self.model, form_class, m)\n",
"\n",
" return form_class\n",
"\n",
" def scaffold_auto_joins(self):\n",
" \"\"\"\n",
" Return a list of joined tables by going through the\n",
" displayed columns.\n",
" \"\"\"\n",
" if not self.column_auto_select_related:\n",
" return []\n",
"\n",
" relations = set()\n",
"\n",
" for p in self._get_model_iterator():\n",
" if hasattr(p, 'direction'):\n",
" # Check if it is pointing to same model\n",
" if p.mapper.class_ == self.model:\n",
" continue\n",
"\n",
" if p.direction.name in ['MANYTOONE', 'MANYTOMANY']:\n",
" relations.add(p.key)\n",
"\n",
" joined = []\n",
"\n",
" for prop, name in self._list_columns:\n",
" if prop in relations:\n",
" joined.append(getattr(self.model, prop))\n",
"\n",
" return joined\n",
"\n",
" # AJAX foreignkey support\n",
" def _create_ajax_loader(self, name, options):\n",
" return create_ajax_loader(self.model, self.session, name, name, options)\n",
"\n",
" # Database-related API\n",
" def get_query(self):\n",
" \"\"\"\n",
" Return a query for the model type.\n",
"\n",
" If you override this method, don't forget to override `get_count_query` as well.\n",
" \"\"\"\n",
" return self.session.query(self.model)\n",
"\n",
" def get_count_query(self):\n",
" \"\"\"\n",
" Return a the count query for the model type\n",
" \"\"\"\n",
" return self.session.query(func.count('*')).select_from(self.model)\n",
"\n",
" def _order_by(self, query, joins, sort_field, sort_desc):\n",
" \"\"\"\n",
" Apply order_by to the query\n",
"\n",
" :param query:\n",
" Query\n",
" :param joins:\n",
" Joins set\n",
" :param sort_field:\n",
" Sort field\n",
" :param sort_desc:\n",
" Ascending or descending\n",
" \"\"\"\n",
" # TODO: Preprocessing for joins\n",
" # Try to handle it as a string\n",
" if isinstance(sort_field, string_types):\n",
" # Create automatic join against a table if column name\n",
" # contains dot.\n",
" if '.' in sort_field:\n",
" parts = sort_field.split('.', 1)\n",
"\n",
" if parts[0] not in joins:\n",
" query = query.join(parts[0])\n",
" joins.add(parts[0])\n",
" elif isinstance(sort_field, InstrumentedAttribute):\n",
" # SQLAlchemy 0.8+ uses 'parent' as a name\n",
" mapper = getattr(sort_field, 'parent', None)\n",
" if mapper is None:\n",
" # SQLAlchemy 0.7.x uses parententity\n",
" mapper = getattr(sort_field, 'parententity', None)\n",
"\n",
" if mapper is not None:\n",
" table = mapper.tables[0]\n",
"\n",
" if self._need_join(table) and table.name not in joins:\n",
" query = query.outerjoin(table)\n",
" joins.add(table.name)\n",
" elif isinstance(sort_field, Column):\n",
" pass\n",
" else:\n",
" raise TypeError('Wrong argument type')\n",
"\n",
" if sort_field is not None:\n",
" if sort_desc:\n",
" query = query.order_by(desc(sort_field))\n",
" else:\n",
" query = query.order_by(sort_field)\n",
"\n",
" return query, joins\n",
"\n",
" def _get_default_order(self):\n",
" order = super(ModelView, self)._get_default_order()\n",
"\n",
" if order is not None:\n",
" field, direction = order\n",
"\n",
" if isinstance(field, string_types):\n",
" field = getattr(self.model, field)\n",
"\n",
" return field, direction\n",
"\n",
" return None\n",
"\n",
" def get_list(self, page, sort_column, sort_desc, search, filters, execute=True):\n",
" \"\"\"\n",
" Return models from the database.\n",
"\n",
" :param page:\n",
" Page number\n",
" :param sort_column:\n",
" Sort column name\n",
" :param sort_desc:\n",
" Descending or ascending sort\n",
" :param search:\n",
" Search query\n",
" :param execute:\n",
" Execute query immediately? Default is `True`\n",
" :param filters:\n",
" List of filter tuples\n",
" \"\"\"\n",
"\n",
" # Will contain names of joined tables to avoid duplicate joins\n",
" joins = set()\n",
"\n",
" query = self.get_query()\n",
" count_query = self.get_count_query()\n",
"\n",
" # Apply search criteria\n",
" if self._search_supported and search:\n",
" # Apply search-related joins\n",
" if self._search_joins:\n",
" for jn in self._search_joins.values():\n",
" query = query.join(jn)\n",
" count_query = count_query.join(jn)\n",
"\n",
" joins = set(self._search_joins.keys())\n",
"\n",
" # Apply terms\n",
" terms = search.split(' ')\n",
"\n",
" for term in terms:\n",
" if not term:\n",
" continue\n",
"\n",
" stmt = tools.parse_like_term(term)\n",
" filter_stmt = [c.ilike(stmt) for c in self._search_fields]\n",
" query = query.filter(or_(*filter_stmt))\n",
" count_query = count_query.filter(or_(*filter_stmt))\n",
"\n",
" # Apply filters\n",
" if filters and self._filters:\n",
" for idx, value in filters:\n",
" flt = self._filters[idx]\n",
"\n",
" # Figure out joins\n",
" tbl = flt.column.table.name\n",
"\n",
" join_tables = self._filter_joins.get(tbl, [])\n",
"\n",
" for table in join_tables:\n",
" if table.name not in joins:\n",
" query = query.join(table)\n",
" count_query = count_query.join(table)\n",
" joins.add(table.name)\n",
"\n",
" # Apply filter\n",
" query = flt.apply(query, value)\n",
" count_query = flt.apply(count_query, value)\n",
"\n",
" # Calculate number of rows\n",
" count = count_query.scalar()\n",
"\n",
" # Auto join\n",
" for j in self._auto_joins:\n",
" query = query.options(joinedload(j))\n",
"\n",
" # Sorting\n",
" if sort_column is not None:\n",
" if sort_column in self._sortable_columns:\n",
" sort_field = self._sortable_columns[sort_column]\n",
"\n",
" query, joins = self._order_by(query, joins, sort_field, sort_desc)\n",
" else:\n",
" order = self._get_default_order()\n",
"\n",
" if order:\n",
" query, joins = self._order_by(query, joins, order[0], order[1])\n",
"\n",
" # Pagination\n",
" if page is not None:\n",
" query = query.offset(page * self.page_size)\n",
"\n",
" query = query.limit(self.page_size)\n",
"\n",
" # Execute if needed\n",
" if execute:\n",
" query = query.all()\n",
"\n",
" return count, query\n",
"\n",
" def get_one(self, id):\n",
" \"\"\"\n",
" Return a single model by its id.\n",
"\n",
" :param id:\n",
" Model id\n",
" \"\"\"\n",
" return self.session.query(self.model).get(id)\n",
"\n",
" # Model handlers\n",
" def create_model(self, form):\n",
" \"\"\"\n",
" Create model from form.\n",
"\n",
" :param form:\n",
" Form instance\n",
" \"\"\"\n",
" try:\n",
" model = self.model()\n",
" form.populate_obj(model)\n",
" self.session.add(model)\n",
" self._on_model_change(form, model, True)\n",
" self.session.commit()\n",
" except Exception as ex:\n",
" if self._debug:\n",
" raise\n",
"\n",
" flash(gettext('Failed to create model. %(error)s', error=str(ex)), 'error')\n",
" log.exception('Failed to create model')\n",
" self.session.rollback()\n",
" return False\n",
" else:\n",
" self.after_model_change(form, model, True)\n",
"\n",
" return True\n",
"\n",
" def update_model(self, form, model):\n",
" \"\"\"\n",
" Update model from form.\n",
"\n",
" :param form:\n",
" Form instance\n",
" :param model:\n",
" Model instance\n",
" \"\"\"\n",
" try:\n",
" form.populate_obj(model)\n",
" self._on_model_change(form, model, False)\n",
" self.session.commit()\n",
" except Exception as ex:\n",
" if self._debug:\n",
" raise\n",
"\n",
" flash(gettext('Failed to update model. %(error)s', error=str(ex)), 'error')\n",
" log.exception('Failed to update model')\n",
" self.session.rollback()\n",
"\n",
" return False\n",
" else:\n",
" self.after_model_change(form, model, False)\n",
"\n",
" return True\n",
"\n",
" def delete_model(self, model):\n",
" \"\"\"\n",
" Delete model.\n",
"\n",
" :param model:\n",
" Model to delete\n",
" \"\"\"\n",
" try:\n",
" self.on_model_delete(model)\n",
" self.session.flush()\n",
" self.session.delete(model)\n",
" self.session.commit()\n",
" return True\n",
" except Exception as ex:\n",
" if self._debug:\n",
" raise\n",
"\n",
" flash(gettext('Failed to delete model. %(error)s', error=str(ex)), 'error')\n",
" log.exception('Failed to delete model')\n",
" self.session.rollback()\n",
" return False\n",
"\n",
" # Default model actions\n",
" def is_action_allowed(self, name):\n",
" # Check delete action permission\n",
" if name == 'delete' and not self.can_delete:\n",
" return False\n",
"\n",
" return super(ModelView, self).is_action_allowed(name)\n",
"\n",
" @action('delete',\n",
" lazy_gettext('Delete'),\n",
" lazy_gettext('Are you sure you want to delete selected models?'))\n",
" def action_delete(self, ids):\n",
" try:\n",
"\n",
" query = get_query_for_ids(self.get_query(), self.model, ids)\n",
"\n",
" if self.fast_mass_delete:\n",
" count = query.delete(synchronize_session=False)\n",
" else:\n",
" count = 0\n",
"\n",
" for m in query.all():\n",
" self.session.delete(m)\n",
" count += 1\n",
"\n",
" self.session.commit()\n",
"\n",
" flash(ngettext('Model was successfully deleted.',\n",
" '%(count)s models were successfully deleted.',\n",
" count,\n",
" count=count))\n",
" except Exception as ex:\n",
" if self._debug:\n",
" raise\n",
"\n",
" flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.014705882352941176,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0.012195121951219513,
0.011764705882352941,
0.012195121951219513,
0,
0,
0,
0.011764705882352941,
0,
0.012345679012345678,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0.008264462809917356,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0.011627906976744186,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775
] | 898 | 0.000493 |
#!/usr/bin/env python
###
# Copyright 2015, EMBL-EBI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import csv
import os.path
def parse(tsv_path):
if not os.path.exists(tsv_path):
raise IOError("File %s does not exist" % tsv_path)
if os.stat(tsv_path).st_size == 0:
return TsvConfig([])
rows = []
with open(tsv_path, 'r') as tsv:
reader = csv.reader(utf_8_encoder(tsv), delimiter="\t")
try:
for row in reader:
rows.append(row)
except csv.Error as e:
raise ValueError('TSV Format Error: file %s, line %d: %s' % (tsv_path, reader.line_num, e))
return TsvConfig(rows)
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class TsvConfig:
def __init__(self, rows):
self.rows = []
self.header = None
if (rows is None) or len(rows) == 0:
return
header = rows[0]
self.rows = [self.as_dict(x, header) for x in rows[1:]]
def all_rows(self):
for r in self.rows:
yield r
def is_empty(self):
return len(self.rows) == 0
def row_size(self):
return len(self.rows)
def col_size(self):
return 0 if self.is_empty() else self.row_at(0).size()
def row_at(self, index):
return self.rows[index] if 0 <= index <= self.row_size() else None
def as_dict(self, values, headers):
d = dict()
for i, h in enumerate(headers, start=0):
d[h.lower()] = values[i]
return d
| [
"#!/usr/bin/env python\n",
"\n",
"###\n",
"# Copyright 2015, EMBL-EBI\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"###\n",
"\n",
"import csv\n",
"\n",
"import os.path\n",
"\n",
"\n",
"def parse(tsv_path):\n",
" if not os.path.exists(tsv_path):\n",
" raise IOError(\"File %s does not exist\" % tsv_path)\n",
"\n",
" if os.stat(tsv_path).st_size == 0:\n",
" return TsvConfig([])\n",
"\n",
" rows = []\n",
" with open(tsv_path, 'r') as tsv:\n",
" reader = csv.reader(utf_8_encoder(tsv), delimiter=\"\\t\")\n",
" try:\n",
" for row in reader:\n",
" rows.append(row)\n",
" except csv.Error as e:\n",
" raise ValueError('TSV Format Error: file %s, line %d: %s' % (tsv_path, reader.line_num, e))\n",
"\n",
" return TsvConfig(rows)\n",
"\n",
"\n",
"def utf_8_encoder(unicode_csv_data):\n",
" for line in unicode_csv_data:\n",
" yield line.encode('utf-8')\n",
"\n",
"\n",
"class TsvConfig:\n",
" def __init__(self, rows):\n",
" self.rows = []\n",
" self.header = None\n",
"\n",
" if (rows is None) or len(rows) == 0:\n",
" return\n",
"\n",
" header = rows[0]\n",
" self.rows = [self.as_dict(x, header) for x in rows[1:]]\n",
"\n",
" def all_rows(self):\n",
" for r in self.rows:\n",
" yield r\n",
"\n",
" def is_empty(self):\n",
" return len(self.rows) == 0\n",
"\n",
" def row_size(self):\n",
" return len(self.rows)\n",
"\n",
" def col_size(self):\n",
" return 0 if self.is_empty() else self.row_at(0).size()\n",
"\n",
" def row_at(self, index):\n",
" return self.rows[index] if 0 <= index <= self.row_size() else None\n",
"\n",
" def as_dict(self, values, headers):\n",
" d = dict()\n",
" for i, h in enumerate(headers, start=0):\n",
" d[h.lower()] = values[i]\n",
" return d\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 79 | 0.000122 |
# -*- coding: utf-8 -*-
# MD-Tracks is a trajectory analysis toolkit for molecular dynamics
# and monte carlo simulations.
# Copyright (C) 2007 - 2012 Toon Verstraelen <[email protected]>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of MD-Tracks.
#
# MD-Tracks is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "MD-TRACKS: A productive solution for the advanced analysis of Molecular
# Dynamics and Monte Carlo simulations", Toon Verstraelen, Marc Van Houteghem,
# Veronique Van Speybroeck and Michel Waroquier, Journal of Chemical Information
# and Modeling, 48 (12), 2414-2424, 2008
# DOI:10.1021/ci800233y
#
# MD-Tracks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
def add_quiet_option(parser):
parser.add_option(
"-q", "--quiet", action="store_false", dest="verbose", default=True,
help="Don't print any output."
)
def add_slice_option(parser):
parser.add_option(
"-s", "--slice", default="::",
help="Subsample the (time dependent) input tracks with the given slice "
"start:stop:step where start, stop and step must be integers or "
"can be omitted. The slice interpretation is pythonic. "
"[default=%default]",
)
def add_append_option(parser):
parser.add_option(
"--append", action="store_false", dest="clear", default=True,
help="Append to existing tracks if possible."
)
def add_cell_option(parser):
parser.add_option(
"-c", "--cell", dest="unit_cell_str", default=None,
help="Take into account the given periodic boundary conditions. "
"The unit cell parameters. Several formats are supported. "
"(i) 'a,' A cubic unit cell with ridge a. "
"(ii) 'a,b,c' The parameters of an orthorhombic cell. "
"(iii) 'a,b,c,alpha,beta,gamma' The parameters for a triclinic cell. "
"(iv) 'ax,ay,az,bx,by,bz,cx,cy,cz' The cartesian parameters for a triclinic cell. "
"(v) 'cell_prefix' A track prefix can be used for a time dependent unit cell. "
"The presence of comma's is used to differentiate between all "
"the possibilities."
)
def add_cos_option(parser):
parser.add_option(
"--cos", action="store_true", default=False,
help="Compute the cosine instead of the angle."
)
def add_filter_atoms_option(parser):
parser.add_option(
"-a", "--filter-atoms",
help="Only consider the atoms listed in FILTER_ATOMS. FILTER_ATOMS is a "
"comma separated list of of integers. Counting starts at zero.",
)
def add_ic_project_option(parser, name):
parser.add_option(
"-p", "--project", action="store_true", default=False,
help="Project the cartesian velocity vector on the tangents of the internal"
"coordinate. (in this case %s)" % name,
)
def add_select_options(parser):
parser.add_option(
"-p", "--prefix", help="Format the indexes with the given prefix. The "
"output will look like 'PREFIX.0000000 PREFIX.0000001 ...'"
)
parser.add_option(
"--xyz", action='store_true', default=False,
help="Append x, y and z extension to the prefixes. (Only applicable when "
"-p is used. The output will look like 'PREFIX.0000000.x prefix.0000000.y "
"PREFIX.0000000.z PREFIX.0000001.x ...'"
)
def add_cor_time_unit(parser):
parser.add_option(
"-t", "--time-unit", default='au',
help="The correlation time is printed in the given TIME_UNIT. [default=%default]",
)
def add_blocks_option(parser):
parser.add_option(
"-b", "--blocks", default="1",
help="The input data is divided in BLOCKS and the final "
"power spectrum is the average over all the power spectra of the distinct "
"blocks. This reduces the noise in the amplitude vector, but it also "
"reduces the resolution on the frequency/wavenumber axis. "
"[default=%default]"
)
def add_pca_options(parser, default_unit):
parser.add_option(
"-c", "--corr-coeff", action="store_true", default=False,
help="Perform pca on the correlation coefficient matrix instead of the "
"covariance matrix.",
)
parser.add_option(
"-p", "--dump-pcs", action="store_true", default=False,
help="Dump the principal components in the tracks database",
)
parser.add_option(
"-n", "--num-levels", default=2, type="int",
help="The number of levels of block sizes to run the pca. "
"[default=%default]. At least 2."
)
parser.add_option(
"-u", "--unit", default=default_unit,
help="The unit in which the sigma's are printed on screen. [default=%default]",
)
def add_zero_mean_option(parser):
parser.add_option(
"-z", "--zero-mean", action="store_true", default=False,
help="Do not substract the mean from the input tracks prior to pca.",
)
| [
"# -*- coding: utf-8 -*-\n",
"# MD-Tracks is a trajectory analysis toolkit for molecular dynamics\n",
"# and monte carlo simulations.\n",
"# Copyright (C) 2007 - 2012 Toon Verstraelen <[email protected]>, Center\n",
"# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights\n",
"# reserved unless otherwise stated.\n",
"#\n",
"# This file is part of MD-Tracks.\n",
"#\n",
"# MD-Tracks is free software; you can redistribute it and/or\n",
"# modify it under the terms of the GNU General Public License\n",
"# as published by the Free Software Foundation; either version 3\n",
"# of the License, or (at your option) any later version.\n",
"#\n",
"# In addition to the regulations of the GNU General Public License,\n",
"# publications and communications based in parts on this program or on\n",
"# parts of this program are required to cite the following article:\n",
"#\n",
"# \"MD-TRACKS: A productive solution for the advanced analysis of Molecular\n",
"# Dynamics and Monte Carlo simulations\", Toon Verstraelen, Marc Van Houteghem,\n",
"# Veronique Van Speybroeck and Michel Waroquier, Journal of Chemical Information\n",
"# and Modeling, 48 (12), 2414-2424, 2008\n",
"# DOI:10.1021/ci800233y\n",
"#\n",
"# MD-Tracks is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program; if not, see <http://www.gnu.org/licenses/>\n",
"#\n",
"#--\n",
"\n",
"\n",
"\n",
"def add_quiet_option(parser):\n",
" parser.add_option(\n",
" \"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True,\n",
" help=\"Don't print any output.\"\n",
" )\n",
"\n",
"def add_slice_option(parser):\n",
" parser.add_option(\n",
" \"-s\", \"--slice\", default=\"::\",\n",
" help=\"Subsample the (time dependent) input tracks with the given slice \"\n",
" \"start:stop:step where start, stop and step must be integers or \"\n",
" \"can be omitted. The slice interpretation is pythonic. \"\n",
" \"[default=%default]\",\n",
" )\n",
"\n",
"def add_append_option(parser):\n",
" parser.add_option(\n",
" \"--append\", action=\"store_false\", dest=\"clear\", default=True,\n",
" help=\"Append to existing tracks if possible.\"\n",
" )\n",
"\n",
"def add_cell_option(parser):\n",
" parser.add_option(\n",
" \"-c\", \"--cell\", dest=\"unit_cell_str\", default=None,\n",
" help=\"Take into account the given periodic boundary conditions. \"\n",
" \"The unit cell parameters. Several formats are supported. \"\n",
" \"(i) 'a,' A cubic unit cell with ridge a. \"\n",
" \"(ii) 'a,b,c' The parameters of an orthorhombic cell. \"\n",
" \"(iii) 'a,b,c,alpha,beta,gamma' The parameters for a triclinic cell. \"\n",
" \"(iv) 'ax,ay,az,bx,by,bz,cx,cy,cz' The cartesian parameters for a triclinic cell. \"\n",
" \"(v) 'cell_prefix' A track prefix can be used for a time dependent unit cell. \"\n",
" \"The presence of comma's is used to differentiate between all \"\n",
" \"the possibilities.\"\n",
" )\n",
"\n",
"def add_cos_option(parser):\n",
" parser.add_option(\n",
" \"--cos\", action=\"store_true\", default=False,\n",
" help=\"Compute the cosine instead of the angle.\"\n",
" )\n",
"\n",
"def add_filter_atoms_option(parser):\n",
" parser.add_option(\n",
" \"-a\", \"--filter-atoms\",\n",
" help=\"Only consider the atoms listed in FILTER_ATOMS. FILTER_ATOMS is a \"\n",
" \"comma separated list of of integers. Counting starts at zero.\",\n",
" )\n",
"\n",
"def add_ic_project_option(parser, name):\n",
" parser.add_option(\n",
" \"-p\", \"--project\", action=\"store_true\", default=False,\n",
" help=\"Project the cartesian velocity vector on the tangents of the internal\"\n",
" \"coordinate. (in this case %s)\" % name,\n",
" )\n",
"\n",
"def add_select_options(parser):\n",
" parser.add_option(\n",
" \"-p\", \"--prefix\", help=\"Format the indexes with the given prefix. The \"\n",
" \"output will look like 'PREFIX.0000000 PREFIX.0000001 ...'\"\n",
" )\n",
" parser.add_option(\n",
" \"--xyz\", action='store_true', default=False,\n",
" help=\"Append x, y and z extension to the prefixes. (Only applicable when \"\n",
" \"-p is used. The output will look like 'PREFIX.0000000.x prefix.0000000.y \"\n",
" \"PREFIX.0000000.z PREFIX.0000001.x ...'\"\n",
" )\n",
"\n",
"def add_cor_time_unit(parser):\n",
" parser.add_option(\n",
" \"-t\", \"--time-unit\", default='au',\n",
" help=\"The correlation time is printed in the given TIME_UNIT. [default=%default]\",\n",
" )\n",
"\n",
"def add_blocks_option(parser):\n",
" parser.add_option(\n",
" \"-b\", \"--blocks\", default=\"1\",\n",
" help=\"The input data is divided in BLOCKS and the final \"\n",
" \"power spectrum is the average over all the power spectra of the distinct \"\n",
" \"blocks. This reduces the noise in the amplitude vector, but it also \"\n",
" \"reduces the resolution on the frequency/wavenumber axis. \"\n",
" \"[default=%default]\"\n",
" )\n",
"\n",
"def add_pca_options(parser, default_unit):\n",
" parser.add_option(\n",
" \"-c\", \"--corr-coeff\", action=\"store_true\", default=False,\n",
" help=\"Perform pca on the correlation coefficient matrix instead of the \"\n",
" \"covariance matrix.\",\n",
" )\n",
" parser.add_option(\n",
" \"-p\", \"--dump-pcs\", action=\"store_true\", default=False,\n",
" help=\"Dump the principal components in the tracks database\",\n",
" )\n",
" parser.add_option(\n",
" \"-n\", \"--num-levels\", default=2, type=\"int\",\n",
" help=\"The number of levels of block sizes to run the pca. \"\n",
" \"[default=%default]. At least 2.\"\n",
" )\n",
" parser.add_option(\n",
" \"-u\", \"--unit\", default=default_unit,\n",
" help=\"The unit in which the sigma's are printed on screen. [default=%default]\",\n",
" )\n",
"\n",
"def add_zero_mean_option(parser):\n",
" parser.add_option(\n",
" \"-z\", \"--zero-mean\", action=\"store_true\", default=False,\n",
" help=\"Do not substract the mean from the input tracks prior to pca.\",\n",
" )\n",
"\n",
"\n"
] | [
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.25,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.010309278350515464,
0.010752688172043012,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0.012195121951219513,
0,
0,
0,
0.024390243902439025,
0,
0,
0.011764705882352941,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.011904761904761904,
0,
0,
0,
0.03225806451612903,
0,
0,
0.01098901098901099,
0,
0,
0.03225806451612903,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
1
] | 146 | 0.012216 |
#!/usr/bin/env python
#importamos el modulo socket
import socket
#instanciamos un objeto para trabajar con el socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Con el metodo bind le indicamos que puerto debe escuchar y de que servidor esperar conexiones
#Es mejor dejarlo en blanco para recibir conexiones externas si es nuestro caso
s.bind(("", 9999))
#Aceptamos conexiones entrantes con el metodo listen, y ademas aplicamos como parametro
#El numero de conexiones entrantes que vamos a aceptar
s.listen(1)
#Instanciamos un objeto sc (socket cliente) para recibir datos, al recibir datos este
#devolvera tambien un objeto que representa una tupla con los datos de conexion: IP y puerto
sc, addr = s.accept()
while True:
#Recibimos el mensaje, con el metodo recv recibimos datos y como parametro
#la cantidad de bytes para recibir
recibido = sc.recv(1024)
#Si el mensaje recibido es la palabra close se cierra la aplicacion
if recibido == "close":
break
#Si se reciben datos nos muestra la IP y el mensaje recibido
print (str(addr[0]) + " dice: ", recibido)
#Devolvemos el mensaje al cliente
sc.send(recibido)
print ("Adios.")
#Cerramos la instancia del socket cliente y servidor
sc.close()
s.close()
| [
"#!/usr/bin/env python\n",
" \n",
"#importamos el modulo socket\n",
"import socket\n",
" \n",
"#instanciamos un objeto para trabajar con el socket\n",
"s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
" \n",
"#Con el metodo bind le indicamos que puerto debe escuchar y de que servidor esperar conexiones\n",
"#Es mejor dejarlo en blanco para recibir conexiones externas si es nuestro caso\n",
"s.bind((\"\", 9999))\n",
" \n",
"#Aceptamos conexiones entrantes con el metodo listen, y ademas aplicamos como parametro\n",
"#El numero de conexiones entrantes que vamos a aceptar\n",
"s.listen(1)\n",
" \n",
"#Instanciamos un objeto sc (socket cliente) para recibir datos, al recibir datos este \n",
"#devolvera tambien un objeto que representa una tupla con los datos de conexion: IP y puerto\n",
"sc, addr = s.accept()\n",
" \n",
" \n",
"while True:\n",
" \n",
" #Recibimos el mensaje, con el metodo recv recibimos datos y como parametro \n",
" #la cantidad de bytes para recibir\n",
" recibido = sc.recv(1024)\n",
" \n",
" #Si el mensaje recibido es la palabra close se cierra la aplicacion\n",
" if recibido == \"close\":\n",
" break\n",
" \n",
" #Si se reciben datos nos muestra la IP y el mensaje recibido\n",
" print (str(addr[0]) + \" dice: \", recibido)\n",
" \n",
" #Devolvemos el mensaje al cliente\n",
" sc.send(recibido)\n",
"print (\"Adios.\")\n",
" \n",
"#Cerramos la instancia del socket cliente y servidor\n",
"sc.close()\n",
"s.close()\n"
] | [
0,
0.5,
0.034482758620689655,
0,
0.5,
0.019230769230769232,
0,
0.5,
0.021052631578947368,
0.0125,
0,
0.5,
0.022727272727272728,
0.01818181818181818,
0,
0.5,
0.034482758620689655,
0.021505376344086023,
0,
0.5,
0.5,
0,
0.5,
0.025,
0.02564102564102564,
0,
0.5,
0.013888888888888888,
0,
0,
0.5,
0.015384615384615385,
0.02127659574468085,
0.5,
0.02631578947368421,
0,
0.058823529411764705,
0.5,
0.018867924528301886,
0,
0
] | 41 | 0.155838 |
""" Defines the DataLabel class and related trait and function.
"""
# Major library imports
from numpy import array, asarray, inf
from numpy.linalg import norm
# Enthought library imports
from traits.api import Any, Array, Bool, Enum, Float, Int, List, \
Str, Tuple, Trait, on_trait_change, Property
from enable.api import ColorTrait, MarkerTrait
# Local, relative imports
from scatterplot import render_markers
from tooltip import ToolTip
# Specifies the position of a label relative to its target. This can
# be one of the text strings indicated, or a tuple or list of floats representing
# the (x_offset, y_offset) in screen space of the label's lower left corner.
LabelPositionTrait = Trait("top right",
Enum("bottom", "left", "right", "top",
"top right", "top left", "bottom left", "bottom right"),
Tuple, List)
def draw_arrow(gc, pt1, pt2, color, arrowhead_size=10.0, offset1=0,
offset2=0, arrow=None, minlen=0, maxlen=inf):
""" Renders an arrow from *pt1* to *pt2*. If gc is None, then just returns
the arrow object.
Parameters
==========
gc : graphics context
where to render the arrow
pt1 : point
the origin of the arrow
pt2 : point
where the arrow is pointing
color : a 3- or 4-tuple of color value
the color to use for the arrow stem and head
arrowhead_size : number
screen units corresponding to the length of the arrowhead
offset1 : number
the amount of space from the start of the arrow to pt1
offset2 : number
the amount of space from the tip of the arrow to pt2
arrow : object
an opaque object returned by previous calls to draw_arrow. If this
argument is provided, all other arguments (except gc) are ignored
minlen: number or None
the minimum length of the arrow; if the arrow is shorter than this,
it will not be drawn
maxlen: number or None
the maximum length of the arrow; if the arrow is longer than this, it
will not be drawn
Returns
=======
An 'arrow' (opaque object) which can be passed in to subsequent
calls to this method to short-circuit some of the computation.
Even if an arrow is not drawn (due to minlen/maxlen restrictions),
an arrow will be returned.
"""
if arrow is None:
pt1 = asarray(pt1)
pt2 = asarray(pt2)
unit_vec = (pt2-pt1)
unit_vec /= norm(unit_vec)
if unit_vec[0] == 0:
perp_vec = array((0.3 * arrowhead_size,0))
elif unit_vec[1] == 0:
perp_vec = array((0,0.3 * arrowhead_size))
else:
slope = unit_vec[1]/unit_vec[0]
perp_slope = -1/slope
perp_vec = array((1.0, perp_slope))
perp_vec *= 0.3 * arrowhead_size / norm(perp_vec)
pt1 = pt1 + offset1 * unit_vec
pt2 = pt2 - offset2 * unit_vec
arrowhead_l = pt2 - (arrowhead_size*unit_vec + perp_vec)
arrowhead_r = pt2 - (arrowhead_size*unit_vec - perp_vec)
arrow = (pt1, pt2, arrowhead_l, arrowhead_r)
else:
pt1, pt2, arrowhead_l, arrowhead_r = arrow
arrowlen = norm(pt2 - pt1)
if arrowlen < minlen or arrowlen > maxlen:
# This is the easiest way to circumvent the actual drawing
gc = None
if gc is not None:
gc.set_stroke_color(color)
gc.set_fill_color(color)
gc.begin_path()
gc.move_to(*pt1)
gc.line_to(*pt2)
gc.stroke_path()
gc.move_to(*pt2)
gc.line_to(*arrowhead_l)
gc.line_to(*arrowhead_r)
gc.fill_path()
return arrow
class DataLabel(ToolTip):
""" A label on a point in data space, optionally with an arrow to the point.
"""
# The symbol to use if **marker** is set to "custom". This attribute must
# be a compiled path for the given Kiva context.
custom_symbol = Any
# The point in data space where this label should anchor itself.
data_point = Trait(None, None, Tuple, List, Array)
# The location of the data label relative to the data point.
label_position = LabelPositionTrait
# The format string that determines the label's text. This string is
# formatted using a dict containing the keys 'x' and 'y', corresponding to
# data space values.
label_format = Str("(%(x)f, %(y)f)")
# The text to show on the label, or above the coordinates for the label, if
# show_label_coords is True
label_text = Str
# Flag whether to show coordinates with the label or not.
show_label_coords = Bool(True)
# Does the label clip itself against the main plot area? If not, then
# the label draws into the padding area (where axes typically reside).
clip_to_plot = Bool(True)
# The center x position (average of x and x2)
xmid = Property(Float, depends_on=['x', 'x2'])
# The center y position (average of y and y2)
ymid = Property(Float, depends_on=['y', 'y2'])
#----------------------------------------------------------------------
# Marker traits
#----------------------------------------------------------------------
# Mark the point on the data that this label refers to?
marker_visible = Bool(True)
# The type of marker to use. This is a mapped trait using strings as the
# keys.
marker = MarkerTrait
# The pixel size of the marker (doesn't include the thickness of the outline).
marker_size = Int(4)
# The thickness, in pixels, of the outline to draw around the marker. If
# this is 0, no outline will be drawn.
marker_line_width = Float(1.0)
# The color of the inside of the marker.
marker_color = ColorTrait("red")
# The color out of the border drawn around the marker.
marker_line_color = ColorTrait("black")
#----------------------------------------------------------------------
# Arrow traits
#----------------------------------------------------------------------
# Draw an arrow from the label to the data point? Only
# used if **data_point** is not None.
arrow_visible = Bool(True) # FIXME: replace with some sort of ArrowStyle
# The length of the arrowhead, in screen points (e.g., pixels).
arrow_size = Float(10)
# The color of the arrow.
arrow_color = ColorTrait("black")
# The position of the base of the arrow on the label. If this
# is 'auto', then the label uses **label_position**. Otherwise, it treats
# the label as if it were at the label position indicated by this attribute.
arrow_root = Trait("auto", "auto", "top left", "top right", "bottom left",
"bottom right", "top center", "bottom center",
"left center", "right center")
# The minimum length of the arrow before it will be drawn. By default,
# the arrow will be drawn regardless of how short it is.
arrow_min_length = Float(0)
# The maximum length of the arrow before it will be drawn. By default,
# the arrow will be drawn regardless of how long it is.
arrow_max_length = Float(inf)
#-------------------------------------------------------------------------
# Private traits
#-------------------------------------------------------------------------
# Tuple (sx, sy) of the mapped screen coordinates of **data_point**.
_screen_coords = Any
_cached_arrow = Any
# When **arrow_root** is 'auto', this determines the location on the data label
# from which the arrow is drawn, based on the position of the label relative
# to its data point.
_position_root_map = {
"top left": "bottom right",
"top right": "bottom left",
"bottom left": "top right",
"bottom right": "top left",
"top center": "bottom center",
"bottom center": "top center",
"left center": "right center",
"right center": "left center"
}
_root_positions = {
"bottom right": ("x2", "y"),
"bottom left": ("x", "y"),
"top right": ("x2", "y2"),
"top left": ("x", "y2"),
"top center": ("xmid", "y2"),
"bottom center": ("xmid", "y"),
"left center": ("x", "ymid"),
"right center": ("x2", "ymid"),
}
def overlay(self, component, gc, view_bounds=None, mode="normal"):
""" Draws the tooltip overlaid on another component.
Overrides and extends ToolTip.overlay()
"""
if self.clip_to_plot:
gc.save_state()
c = component
gc.clip_to_rect(c.x, c.y, c.width, c.height)
self.do_layout()
# draw the arrow if necessary
if self.arrow_visible:
if self._cached_arrow is None:
if self.arrow_root in self._root_positions:
ox, oy = self._root_positions[self.arrow_root]
else:
if self.arrow_root == "auto":
arrow_root = self.label_position
else:
arrow_root = self.arrow_root
ox, oy = self._root_positions.get(
self._position_root_map.get(arrow_root, "DUMMY"),
(self.x+self.width/2, self.y+self.height/2)
)
if type(ox) == str:
ox = getattr(self, ox)
oy = getattr(self, oy)
self._cached_arrow = draw_arrow(gc, (ox, oy), self._screen_coords,
self.arrow_color_,
arrowhead_size=self.arrow_size,
offset1=3,
offset2=self.marker_size+3,
minlen=self.arrow_min_length,
maxlen=self.arrow_max_length)
else:
draw_arrow(gc, None, None, self.arrow_color_,
arrow=self._cached_arrow,
minlen=self.arrow_min_length,
maxlen=self.arrow_max_length)
# layout and render the label itself
ToolTip.overlay(self, component, gc, view_bounds, mode)
# draw the marker
if self.marker_visible:
render_markers(gc, [self._screen_coords], self.marker, self.marker_size,
self.marker_color_, self.marker_line_width,
self.marker_line_color_, self.custom_symbol)
if self.clip_to_plot:
gc.restore_state()
def _do_layout(self, size=None):
"""Computes the size and position of the label and arrow.
Overrides and extends ToolTip._do_layout()
"""
if not self.component or not hasattr(self.component, "map_screen"):
return
# Call the parent class layout. This computes all the label
ToolTip._do_layout(self)
self._screen_coords = self.component.map_screen([self.data_point])[0]
sx, sy = self._screen_coords
if isinstance(self.label_position, str):
orientation = self.label_position
if ("left" in orientation) or ("right" in orientation):
if " " not in orientation:
self.y = sy - self.height / 2
if "left" in orientation:
self.outer_x = sx - self.outer_width - 1
elif "right" in orientation:
self.outer_x = sx
if ("top" in orientation) or ("bottom" in orientation):
if " " not in orientation:
self.x = sx - self.width / 2
if "bottom" in orientation:
self.outer_y = sy - self.outer_height - 1
elif "top" in orientation:
self.outer_y = sy
if "center" in orientation:
if " " not in orientation:
self.x = sx - (self.width/2)
self.y = sy - (self.height/2)
else:
self.x = sx - (self.outer_width/2) - 1
self.y = sy - (self.outer_height/2) - 1
else:
self.x = sx + self.label_position[0]
self.y = sy + self.label_position[1]
self._cached_arrow = None
return
def _data_point_changed(self, old, new):
if new is not None:
self._create_new_labels()
def _label_format_changed(self, old, new):
self._create_new_labels()
def _label_text_changed(self, old, new):
self._create_new_labels()
def _show_label_coords_changed(self, old, new):
self._create_new_labels()
def _create_new_labels(self):
pt = self.data_point
if pt is not None:
if self.show_label_coords:
self.lines = [self.label_text, self.label_format % {"x": pt[0], "y": pt[1]}]
else:
self.lines = [self.label_text]
def _component_changed(self, old, new):
for comp, attach in ((old, False), (new, True)):
if comp is not None:
if hasattr(comp, 'index_mapper'):
self._modify_mapper_listeners(comp.index_mapper, attach=attach)
if hasattr(comp, 'value_mapper'):
self._modify_mapper_listeners(comp.value_mapper, attach=attach)
return
def _modify_mapper_listeners(self, mapper, attach=True):
if mapper is not None:
mapper.on_trait_change(self._handle_mapper, 'updated', remove=not attach)
return
def _handle_mapper(self):
# This gets fired whenever a mapper on our plot fires its 'updated' event.
self._layout_needed = True
@on_trait_change("arrow_size,arrow_root,arrow_min_length,arrow_max_length")
def _invalidate_arrow(self):
self._cached_arrow = None
self._layout_needed = True
@on_trait_change("label_position,position,position_items,bounds,bounds_items")
def _invalidate_layout(self):
self._layout_needed = True
def _get_xmid(self):
return 0.5 * (self.x + self.x2)
def _get_ymid(self):
return 0.5 * (self.y + self.y2)
| [
"\"\"\" Defines the DataLabel class and related trait and function.\n",
"\"\"\"\n",
"# Major library imports\n",
"from numpy import array, asarray, inf\n",
"from numpy.linalg import norm\n",
"\n",
"# Enthought library imports\n",
"from traits.api import Any, Array, Bool, Enum, Float, Int, List, \\\n",
" Str, Tuple, Trait, on_trait_change, Property\n",
"from enable.api import ColorTrait, MarkerTrait\n",
"\n",
"# Local, relative imports\n",
"from scatterplot import render_markers\n",
"from tooltip import ToolTip\n",
"\n",
"\n",
"# Specifies the position of a label relative to its target. This can\n",
"# be one of the text strings indicated, or a tuple or list of floats representing\n",
"# the (x_offset, y_offset) in screen space of the label's lower left corner.\n",
"LabelPositionTrait = Trait(\"top right\",\n",
" Enum(\"bottom\", \"left\", \"right\", \"top\",\n",
" \"top right\", \"top left\", \"bottom left\", \"bottom right\"),\n",
" Tuple, List)\n",
"\n",
"\n",
"def draw_arrow(gc, pt1, pt2, color, arrowhead_size=10.0, offset1=0,\n",
" offset2=0, arrow=None, minlen=0, maxlen=inf):\n",
" \"\"\" Renders an arrow from *pt1* to *pt2*. If gc is None, then just returns\n",
" the arrow object.\n",
"\n",
" Parameters\n",
" ==========\n",
" gc : graphics context\n",
" where to render the arrow\n",
" pt1 : point\n",
" the origin of the arrow\n",
" pt2 : point\n",
" where the arrow is pointing\n",
" color : a 3- or 4-tuple of color value\n",
" the color to use for the arrow stem and head\n",
" arrowhead_size : number\n",
" screen units corresponding to the length of the arrowhead\n",
" offset1 : number\n",
" the amount of space from the start of the arrow to pt1\n",
" offset2 : number\n",
" the amount of space from the tip of the arrow to pt2\n",
" arrow : object\n",
" an opaque object returned by previous calls to draw_arrow. If this\n",
" argument is provided, all other arguments (except gc) are ignored\n",
" minlen: number or None\n",
" the minimum length of the arrow; if the arrow is shorter than this,\n",
" it will not be drawn\n",
" maxlen: number or None\n",
" the maximum length of the arrow; if the arrow is longer than this, it\n",
" will not be drawn\n",
"\n",
" Returns\n",
" =======\n",
" An 'arrow' (opaque object) which can be passed in to subsequent\n",
" calls to this method to short-circuit some of the computation.\n",
" Even if an arrow is not drawn (due to minlen/maxlen restrictions),\n",
" an arrow will be returned.\n",
" \"\"\"\n",
"\n",
" if arrow is None:\n",
" pt1 = asarray(pt1)\n",
" pt2 = asarray(pt2)\n",
"\n",
" unit_vec = (pt2-pt1)\n",
" unit_vec /= norm(unit_vec)\n",
"\n",
" if unit_vec[0] == 0:\n",
" perp_vec = array((0.3 * arrowhead_size,0))\n",
" elif unit_vec[1] == 0:\n",
" perp_vec = array((0,0.3 * arrowhead_size))\n",
" else:\n",
" slope = unit_vec[1]/unit_vec[0]\n",
" perp_slope = -1/slope\n",
" perp_vec = array((1.0, perp_slope))\n",
" perp_vec *= 0.3 * arrowhead_size / norm(perp_vec)\n",
"\n",
" pt1 = pt1 + offset1 * unit_vec\n",
" pt2 = pt2 - offset2 * unit_vec\n",
"\n",
" arrowhead_l = pt2 - (arrowhead_size*unit_vec + perp_vec)\n",
" arrowhead_r = pt2 - (arrowhead_size*unit_vec - perp_vec)\n",
" arrow = (pt1, pt2, arrowhead_l, arrowhead_r)\n",
" else:\n",
" pt1, pt2, arrowhead_l, arrowhead_r = arrow\n",
"\n",
" arrowlen = norm(pt2 - pt1)\n",
" if arrowlen < minlen or arrowlen > maxlen:\n",
" # This is the easiest way to circumvent the actual drawing\n",
" gc = None\n",
"\n",
" if gc is not None:\n",
" gc.set_stroke_color(color)\n",
" gc.set_fill_color(color)\n",
" gc.begin_path()\n",
" gc.move_to(*pt1)\n",
" gc.line_to(*pt2)\n",
" gc.stroke_path()\n",
" gc.move_to(*pt2)\n",
" gc.line_to(*arrowhead_l)\n",
" gc.line_to(*arrowhead_r)\n",
" gc.fill_path()\n",
" return arrow\n",
"\n",
"\n",
"class DataLabel(ToolTip):\n",
" \"\"\" A label on a point in data space, optionally with an arrow to the point.\n",
" \"\"\"\n",
"\n",
" # The symbol to use if **marker** is set to \"custom\". This attribute must\n",
" # be a compiled path for the given Kiva context.\n",
" custom_symbol = Any\n",
"\n",
" # The point in data space where this label should anchor itself.\n",
" data_point = Trait(None, None, Tuple, List, Array)\n",
"\n",
" # The location of the data label relative to the data point.\n",
" label_position = LabelPositionTrait\n",
"\n",
" # The format string that determines the label's text. This string is\n",
" # formatted using a dict containing the keys 'x' and 'y', corresponding to\n",
" # data space values.\n",
" label_format = Str(\"(%(x)f, %(y)f)\")\n",
"\n",
" # The text to show on the label, or above the coordinates for the label, if\n",
" # show_label_coords is True\n",
" label_text = Str\n",
"\n",
" # Flag whether to show coordinates with the label or not.\n",
" show_label_coords = Bool(True)\n",
"\n",
" # Does the label clip itself against the main plot area? If not, then\n",
" # the label draws into the padding area (where axes typically reside).\n",
" clip_to_plot = Bool(True)\n",
"\n",
" # The center x position (average of x and x2)\n",
" xmid = Property(Float, depends_on=['x', 'x2'])\n",
" \n",
" # The center y position (average of y and y2)\n",
" ymid = Property(Float, depends_on=['y', 'y2'])\n",
"\n",
" #----------------------------------------------------------------------\n",
" # Marker traits\n",
" #----------------------------------------------------------------------\n",
"\n",
" # Mark the point on the data that this label refers to?\n",
" marker_visible = Bool(True)\n",
"\n",
" # The type of marker to use. This is a mapped trait using strings as the\n",
" # keys.\n",
" marker = MarkerTrait\n",
"\n",
" # The pixel size of the marker (doesn't include the thickness of the outline).\n",
" marker_size = Int(4)\n",
"\n",
" # The thickness, in pixels, of the outline to draw around the marker. If\n",
" # this is 0, no outline will be drawn.\n",
" marker_line_width = Float(1.0)\n",
"\n",
" # The color of the inside of the marker.\n",
" marker_color = ColorTrait(\"red\")\n",
"\n",
" # The color out of the border drawn around the marker.\n",
" marker_line_color = ColorTrait(\"black\")\n",
"\n",
" #----------------------------------------------------------------------\n",
" # Arrow traits\n",
" #----------------------------------------------------------------------\n",
"\n",
" # Draw an arrow from the label to the data point? Only\n",
" # used if **data_point** is not None.\n",
" arrow_visible = Bool(True) # FIXME: replace with some sort of ArrowStyle\n",
"\n",
" # The length of the arrowhead, in screen points (e.g., pixels).\n",
" arrow_size = Float(10)\n",
"\n",
" # The color of the arrow.\n",
" arrow_color = ColorTrait(\"black\")\n",
"\n",
" # The position of the base of the arrow on the label. If this\n",
" # is 'auto', then the label uses **label_position**. Otherwise, it treats\n",
" # the label as if it were at the label position indicated by this attribute.\n",
" arrow_root = Trait(\"auto\", \"auto\", \"top left\", \"top right\", \"bottom left\",\n",
" \"bottom right\", \"top center\", \"bottom center\",\n",
" \"left center\", \"right center\")\n",
"\n",
" # The minimum length of the arrow before it will be drawn. By default,\n",
" # the arrow will be drawn regardless of how short it is.\n",
" arrow_min_length = Float(0)\n",
"\n",
" # The maximum length of the arrow before it will be drawn. By default,\n",
" # the arrow will be drawn regardless of how long it is.\n",
" arrow_max_length = Float(inf)\n",
"\n",
" #-------------------------------------------------------------------------\n",
" # Private traits\n",
" #-------------------------------------------------------------------------\n",
"\n",
" # Tuple (sx, sy) of the mapped screen coordinates of **data_point**.\n",
" _screen_coords = Any\n",
"\n",
" _cached_arrow = Any\n",
"\n",
" # When **arrow_root** is 'auto', this determines the location on the data label\n",
" # from which the arrow is drawn, based on the position of the label relative\n",
" # to its data point.\n",
" _position_root_map = {\n",
" \"top left\": \"bottom right\",\n",
" \"top right\": \"bottom left\",\n",
" \"bottom left\": \"top right\",\n",
" \"bottom right\": \"top left\",\n",
" \"top center\": \"bottom center\",\n",
" \"bottom center\": \"top center\",\n",
" \"left center\": \"right center\",\n",
" \"right center\": \"left center\"\n",
" }\n",
"\n",
" _root_positions = {\n",
" \"bottom right\": (\"x2\", \"y\"),\n",
" \"bottom left\": (\"x\", \"y\"),\n",
" \"top right\": (\"x2\", \"y2\"),\n",
" \"top left\": (\"x\", \"y2\"),\n",
" \"top center\": (\"xmid\", \"y2\"),\n",
" \"bottom center\": (\"xmid\", \"y\"),\n",
" \"left center\": (\"x\", \"ymid\"),\n",
" \"right center\": (\"x2\", \"ymid\"),\n",
" }\n",
"\n",
"\n",
" def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n",
" \"\"\" Draws the tooltip overlaid on another component.\n",
"\n",
" Overrides and extends ToolTip.overlay()\n",
" \"\"\"\n",
" if self.clip_to_plot:\n",
" gc.save_state()\n",
" c = component\n",
" gc.clip_to_rect(c.x, c.y, c.width, c.height)\n",
"\n",
" self.do_layout()\n",
"\n",
" # draw the arrow if necessary\n",
" if self.arrow_visible:\n",
" if self._cached_arrow is None:\n",
" if self.arrow_root in self._root_positions:\n",
" ox, oy = self._root_positions[self.arrow_root]\n",
" else:\n",
" if self.arrow_root == \"auto\":\n",
" arrow_root = self.label_position\n",
" else:\n",
" arrow_root = self.arrow_root\n",
" ox, oy = self._root_positions.get(\n",
" self._position_root_map.get(arrow_root, \"DUMMY\"),\n",
" (self.x+self.width/2, self.y+self.height/2)\n",
" )\n",
"\n",
" if type(ox) == str:\n",
" ox = getattr(self, ox)\n",
" oy = getattr(self, oy)\n",
" self._cached_arrow = draw_arrow(gc, (ox, oy), self._screen_coords,\n",
" self.arrow_color_,\n",
" arrowhead_size=self.arrow_size,\n",
" offset1=3,\n",
" offset2=self.marker_size+3,\n",
" minlen=self.arrow_min_length,\n",
" maxlen=self.arrow_max_length)\n",
" else:\n",
" draw_arrow(gc, None, None, self.arrow_color_,\n",
" arrow=self._cached_arrow,\n",
" minlen=self.arrow_min_length,\n",
" maxlen=self.arrow_max_length)\n",
"\n",
" # layout and render the label itself\n",
" ToolTip.overlay(self, component, gc, view_bounds, mode)\n",
"\n",
" # draw the marker\n",
" if self.marker_visible:\n",
" render_markers(gc, [self._screen_coords], self.marker, self.marker_size,\n",
" self.marker_color_, self.marker_line_width,\n",
" self.marker_line_color_, self.custom_symbol)\n",
"\n",
" if self.clip_to_plot:\n",
" gc.restore_state()\n",
"\n",
" def _do_layout(self, size=None):\n",
" \"\"\"Computes the size and position of the label and arrow.\n",
"\n",
" Overrides and extends ToolTip._do_layout()\n",
" \"\"\"\n",
" if not self.component or not hasattr(self.component, \"map_screen\"):\n",
" return\n",
"\n",
" # Call the parent class layout. This computes all the label\n",
" ToolTip._do_layout(self)\n",
"\n",
" self._screen_coords = self.component.map_screen([self.data_point])[0]\n",
" sx, sy = self._screen_coords\n",
"\n",
" if isinstance(self.label_position, str):\n",
" orientation = self.label_position\n",
" if (\"left\" in orientation) or (\"right\" in orientation):\n",
" if \" \" not in orientation:\n",
" self.y = sy - self.height / 2\n",
" if \"left\" in orientation:\n",
" self.outer_x = sx - self.outer_width - 1\n",
" elif \"right\" in orientation:\n",
" self.outer_x = sx\n",
" if (\"top\" in orientation) or (\"bottom\" in orientation):\n",
" if \" \" not in orientation:\n",
" self.x = sx - self.width / 2\n",
" if \"bottom\" in orientation:\n",
" self.outer_y = sy - self.outer_height - 1\n",
" elif \"top\" in orientation:\n",
" self.outer_y = sy\n",
" if \"center\" in orientation:\n",
" if \" \" not in orientation:\n",
" self.x = sx - (self.width/2)\n",
" self.y = sy - (self.height/2)\n",
" else:\n",
" self.x = sx - (self.outer_width/2) - 1\n",
" self.y = sy - (self.outer_height/2) - 1\n",
" else:\n",
" self.x = sx + self.label_position[0]\n",
" self.y = sy + self.label_position[1]\n",
"\n",
" self._cached_arrow = None\n",
" return\n",
"\n",
" def _data_point_changed(self, old, new):\n",
" if new is not None:\n",
" self._create_new_labels()\n",
"\n",
" def _label_format_changed(self, old, new):\n",
" self._create_new_labels()\n",
"\n",
" def _label_text_changed(self, old, new):\n",
" self._create_new_labels()\n",
"\n",
" def _show_label_coords_changed(self, old, new):\n",
" self._create_new_labels()\n",
"\n",
" def _create_new_labels(self):\n",
" pt = self.data_point\n",
" if pt is not None:\n",
" if self.show_label_coords:\n",
" self.lines = [self.label_text, self.label_format % {\"x\": pt[0], \"y\": pt[1]}]\n",
" else:\n",
" self.lines = [self.label_text]\n",
"\n",
" def _component_changed(self, old, new):\n",
" for comp, attach in ((old, False), (new, True)):\n",
" if comp is not None:\n",
" if hasattr(comp, 'index_mapper'):\n",
" self._modify_mapper_listeners(comp.index_mapper, attach=attach)\n",
" if hasattr(comp, 'value_mapper'):\n",
" self._modify_mapper_listeners(comp.value_mapper, attach=attach)\n",
" return\n",
"\n",
" def _modify_mapper_listeners(self, mapper, attach=True):\n",
" if mapper is not None:\n",
" mapper.on_trait_change(self._handle_mapper, 'updated', remove=not attach)\n",
" return\n",
"\n",
" def _handle_mapper(self):\n",
" # This gets fired whenever a mapper on our plot fires its 'updated' event.\n",
" self._layout_needed = True\n",
"\n",
" @on_trait_change(\"arrow_size,arrow_root,arrow_min_length,arrow_max_length\")\n",
" def _invalidate_arrow(self):\n",
" self._cached_arrow = None\n",
" self._layout_needed = True\n",
"\n",
" @on_trait_change(\"label_position,position,position_items,bounds,bounds_items\")\n",
" def _invalidate_layout(self):\n",
" self._layout_needed = True\n",
"\n",
"\n",
" def _get_xmid(self):\n",
" return 0.5 * (self.x + self.x2)\n",
" \n",
" def _get_ymid(self):\n",
" return 0.5 * (self.y + self.y2)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.013157894736842105,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0.012658227848101266,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0.011904761904761904,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.04,
0,
0.2,
0,
0
] | 386 | 0.001966 |
"""Tests for Dalite XBLock."""
from unittest import TestCase
import ddt
import mock
from xblock.core import XBlock
from xblock.field_data import DictFieldData
from xblock.fragment import Fragment
from dalite_xblock import dalite_xblock
from dalite_xblock.dalite_xblock import DaliteXBlock
from dalite_xblock.passport_utils import DaliteLtiPassport
from tests.utils import TestWithPatchesMixin
DEFAULT_LTI_PASSPORTS = [
"dalite-ng-1:dalite-xblock:aHR0cDovL2ZpcnN0LnVybDo4MDgwO0tFWTtTRUNSRVQ=",
"dalite-ng-2:dalite-xblock:aHR0cDovL290aGVyLnVybDtPVEhFUktFWTtPVEhFUlNFQ1JFVA==",
"dalite-ng-3:dalite-xblock:aHR0cHM6Ly8xOTIuMTY4LjMzLjE7YWxwaGE7YmV0YQ==",
"dalite-ng-4:dalite-xblock:aHR0cHM6Ly9leGFtcGxlLmNvbS87YWxwaGE7YmV0YQ=="
]
PARSED_LTI_PASSPORTS = {
# This is http with a port.
"dalite-ng-1": DaliteLtiPassport("dalite-ng-1", "http://first.url:8080", "KEY", "SECRET"),
# This one is http without a port
"dalite-ng-2": DaliteLtiPassport("dalite-ng-2", "http://other.url", "OTHERKEY", "OTHERSECRET"),
# This one is https with IP instead of domain
"dalite-ng-3": DaliteLtiPassport("dalite-ng-3", "https://192.168.33.1", "alpha", "beta"),
# This one has a trailing slash
"dalite-ng-4": DaliteLtiPassport("dalite-ng-3", "https://example.com/", "alpha", "beta")
}
@ddt.ddt
class DaliteXBlockTests(TestCase, TestWithPatchesMixin):
"""Tests for Dalite XBlock."""
DEFAULT_COURSE_ID = "course-1"
def setUp(self):
"""Obviously, setUP method sets up test environment for each individual test to run."""
self.runtime_mock = mock.Mock()
self.runtime_mock.course_id = self.DEFAULT_COURSE_ID
self.block = DaliteXBlock(
self.runtime_mock, DictFieldData({}), scope_ids=mock.Mock()
)
self.mock_course = mock.Mock(spec=XBlock)
self.mock_course.lti_passports = DEFAULT_LTI_PASSPORTS
self.runtime_mock.modulestore.get_course = mock.Mock(return_value=self.mock_course)
def test_course(self):
"""Test course property."""
mock_course = mock.Mock(spec=XBlock)
self.runtime_mock.modulestore.get_course = mock.Mock(return_value=mock_course)
self.assertEqual(self.block.course, mock_course)
self.runtime_mock.modulestore.get_course.assert_called_once_with(self.DEFAULT_COURSE_ID)
def test_dalite_xblock_lti_passports(self):
"""Test dalite_xblock_lti_passports property."""
with mock.patch.object(dalite_xblock, "filter_and_parse_passports") as filter_passwords:
passports = ['some:mock:passport']
self.mock_course.lti_passports = passports
unused_variable_1 = self.block.dalite_xblock_lti_passports
unused_variable_2 = self.block.dalite_xblock_lti_passports
self.assertIs(unused_variable_1, unused_variable_2)
# Calling property twice to check caching behaviour.
filter_passwords.assert_called_once_with(passports)
@ddt.data(
('', None),
('missing', None),
('dalite-ng-1', PARSED_LTI_PASSPORTS['dalite-ng-1']),
('dalite-ng-2', PARSED_LTI_PASSPORTS['dalite-ng-2']),
('dalite-ng-3', PARSED_LTI_PASSPORTS['dalite-ng-3'])
)
@ddt.unpack
def test_lti_passport(self, lti_id, expected_result):
"""Test lti_passport property."""
self.block.lti_id = lti_id
self.assertEqual(self.block.lti_passport, expected_result)
@ddt.data(
('', ''),
('missing', ''),
('dalite-ng-1', "http://first.url:8080/lti/"),
('dalite-ng-2', "http://other.url/lti/"),
('dalite-ng-3', "https://192.168.33.1/lti/"),
('dalite-ng-4', "https://example.com/lti/")
)
@ddt.unpack
def test_launch_url(self, lti_id, launch_url):
"""Test launch_url property."""
self.block.lti_id = lti_id
self.assertEqual(self.block.launch_url, launch_url)
@ddt.data(
('', '', ''),
('missing', '', ''),
('dalite-ng-1', "KEY", "SECRET"),
('dalite-ng-2', "OTHERKEY", "OTHERSECRET")
)
@ddt.unpack
def test_key_secret(self, lti_id, key, secret):
"""Test lti_provider_key_secret property."""
self.block.lti_id = lti_id
self.assertEqual(self.block.lti_provider_key_secret, (key, secret))
@ddt.data(
([], [DaliteXBlock.NO_LTI_PASSPORTS_OPTION]), # no passports at all
(["dalite-ng:QWE:ASD"], [DaliteXBlock.NO_LTI_PASSPORTS_OPTION]), # no dalite-xblock passports
# two dalite and one non-dalite pasport
(
[
"dalite-ng:QWE:ASD",
"dalite-ng-1:dalite-xblock:aHR0cDovL2ZpcnN0LnVybDo4MDgwO0tFWTtTRUNSRVQ=",
"dalite-ng-2:dalite-xblock:aHR0cDovL290aGVyLnVybDtPVEhFUktFWTtPVEhFUlNFQ1JFVA=="
],
[
{"display_name": "dalite-ng-1", "value": "dalite-ng-1"},
{"display_name": "dalite-ng-2", "value": "dalite-ng-2"},
]
)
)
@ddt.unpack
def test_lti_id_values_provider(self, lti_passports, expected_result):
"""Test lti_id_values_provider."""
self.mock_course.lti_passports = lti_passports
self.assertEqual(self.block.lti_id_values_provider(), expected_result)
@ddt.data(
('', 1), ('asgn#1', 1), ('assignment-2', 3), ('almost-irrelevant', 'almost-irrelevenat-too')
)
@ddt.unpack
def test_clean_studio_edits(self, assignment_id, question_id):
"""
Test clean_studio_edites transforms fields coming from Studio editor.
Two transforms are applied:
* Sets values to "fixed" fields: hide_launch, has_score, ask_to_send_username and ask_to_send_email
* Sets "custom_parameters" from assignment_id and question_id
"""
initial_data = {'assignment_id': assignment_id, 'question_id': question_id}
expected_result = {
'hide_launch': False,
'has_score': True,
'custom_parameters': ["assignment_id=" + str(assignment_id), "question_id=" + str(question_id)],
'ask_to_send_username': False,
'ask_to_send_email': False
}
expected_result.update(initial_data) # all initial values should still be there
data = initial_data.copy()
self.block.clean_studio_edits(data)
try:
self.assertEqual(data, expected_result)
except AssertionError:
print "Intitial: ", initial_data
print "Actual: ", data
print "Expected: ", expected_result
raise
def test_is_ready_positive(self):
"""Test is_ready method returns true when has all the data."""
block = DaliteXBlock(
self.runtime_mock, DictFieldData({
'question_id': '4', 'assignment_id': 'foo', 'lti_id': 'dalite-ng-1'
}),
scope_ids=mock.Mock()
)
self.assertTrue(block.is_lti_ready)
def test_is_ready_negative(self):
"""Test is_ready method returns false without the data."""
block = DaliteXBlock(
self.runtime_mock, DictFieldData({}),
scope_ids=mock.Mock()
)
self.assertFalse(block.is_lti_ready)
def test_add_custom_parameters(self):
"""Test for add_extra_custom_params contextmanager."""
canary = ['param1=value1']
additional_params = ["param2=value2", "param3=value3"]
self.block.custom_parameters = canary
with self.block.add_extra_custom_params(additional_params):
self.assertEqual(len(self.block.custom_parameters), len(additional_params + canary))
self.assertEqual(set(self.block.custom_parameters), set(additional_params + canary))
self.assertIs(self.block.custom_parameters, canary)
self.assertEqual(self.block.custom_parameters, ['param1=value1'])
@ddt.data(
# For requests without suffix there is no extra parameters
('', []),
(DaliteXBlock.ADMIN_URL_SUFFIX, [u'action=launch-admin']),
(DaliteXBlock.EDIT_QUESTION_SUFFIX, [u'action=edit-question'])
)
@ddt.unpack
def test_lti_launch_handler(self, suffix, expected_params):
"""Test for lti_launch_handler method."""
request_canary = object()
# Workaround around lack of nonlocal in python 2
actual_values = {}
def super_handler_mock(self, request, suffix=''):
"""A mock version of lti_launch_handler that will be attached to super call."""
actual_values['actual_params'] = self.custom_parameters
actual_values['actual_request'] = request
actual_values['actual_suffix'] = suffix
with mock.patch("dalite_xblock.dalite_xblock.LtiConsumerXBlock.lti_launch_handler", super_handler_mock):
self.block.lti_launch_handler(request_canary, suffix)
self.assertIs(request_canary, actual_values['actual_request'])
self.assertEqual('', actual_values['actual_suffix'])
self.assertEqual(expected_params, actual_values['actual_params'])
def test_render_admin_button(self):
"""Test for the render_button_launching_admin method."""
rendered_canary = "I'm `HTML` template **pinky swear**"
form_url_suffix = "/test"
button_label = "Press Me"
id_specifier = "launch-admin"
context = {
"form_url": "deadbeef/lti_launch_handler",
}
with mock.patch('dalite_xblock.dalite_xblock.DaliteXBlock._get_context_for_template') as context, \
mock.patch(
"dalite_xblock.dalite_xblock.loader.render_django_template", return_value=rendered_canary) as render:
context.return_value = {}
result = self.block.render_button_launching_admin(context, form_url_suffix, button_label, id_specifier)
self.assertEqual(result, rendered_canary)
render.assert_called_once_with(
'/templates/dalite_xblock_lti_iframe.html',
{
'element_id_specifier': 'launch-admin', 'dalite_admin_label': 'Press Me', 'has_score': False,
'form_url_suffix': '/test'
}
)
# TODO: should be an integration test - figure out how to do this
# AS is, this test is extremely fragile - it'll likely break on every code change
def test_student_view(self):
"""Test that student_view adds JS workaround."""
mock_fragment = mock.Mock(spec=Fragment)
context = {}
load_js_result = "Load JS result"
with mock.patch("dalite_xblock.dalite_xblock.LtiConsumerXBlock.student_view") as patched_super, \
mock.patch("dalite_xblock.dalite_xblock.loader.load_unicode") as patched_load_unicode, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready', new_callable=mock.PropertyMock) as is_ready:
patched_super.return_value = mock_fragment
patched_load_unicode.return_value = load_js_result
is_ready.return_value = True
result = self.block.student_view(context)
patched_super.assert_called_once_with(context)
patched_load_unicode.assert_called_once_with('public/js/dalite_xblock.js')
self.assertEqual(result, mock_fragment)
mock_fragment.add_javascript.assert_called_once_with(load_js_result)
mock_fragment.initialize_js.assert_called_once_with('DaliteXBlock')
def _do_error_page_test(self, view_to_test, is_in_studio):
with mock.patch("dalite_xblock.dalite_xblock.LtiConsumerXBlock.student_view") as patched_super, \
mock.patch('dalite_xblock.dalite_xblock.DaliteXBlock._get_context_for_template') as context, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',
new_callable=mock.PropertyMock) as is_ready, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.get_status_message') as get_status_message:
context.return_value = {}
is_ready.return_value = False
patched_super.return_value = Fragment()
get_status_message.return_value = "Some error"
result = view_to_test({})
self.assertIn(
"Some error",
result.body_html()
)
get_status_message.assert_called_once_with(is_in_studio)
def test_author_view_error(self):
"""Test author view calls get_status_message."""
self._do_error_page_test(self.block.author_view, True)
@ddt.data(
# If lti is ready there is no error message
(None, True, True, True),
(None, False, True, True),
# If lti is not ready display the same message in LMS
(DaliteXBlock.LMS_ERROR_MESSAGE, False, False, False),
(DaliteXBlock.LMS_ERROR_MESSAGE, False, False, True),
# In LMS display different message depending on conditions
(DaliteXBlock.CMS_NO_PASSPORT_ERROR, True, False, False),
(DaliteXBlock.CMS_NO_QUESTION_ERROR, True, False, True),
)
@ddt.unpack
def test_get_message(self, expected_message, in_studio, is_ready_value, has_launch_url):
"""Test author view calls get_status_message."""
with mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.launch_url',
new_callable=mock.PropertyMock) as launch_url, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',
new_callable=mock.PropertyMock) as is_ready:
launch_url.return_value = "http://dalite" if has_launch_url else ''
is_ready.return_value = is_ready_value
actual_message = self.block.get_status_message(in_studio)
self.assertEqual(actual_message, expected_message)
def test_author_view_ok(self):
"""Test author view runs successfully."""
render_result = Fragment()
with mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.render_student_view',
return_value=render_result), \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.render_button_launching_admin',
return_value=u'') as render_button, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',
new_callable=mock.PropertyMock) as is_ready, \
mock.patch(
'dalite_xblock.dalite_xblock.DaliteXBlock.launch_url',
new_callable=mock.PropertyMock) as launch_url:
is_ready.return_value = True
launch_url.return_value = "http://example.com/lti/"
self.block.author_view({})
self.assertEqual(len(render_button.call_args_list), 2)
def test_student_view_error(self):
"""Test student view calls get_status_message."""
self._do_error_page_test(self.block.student_view, False)
# TODO: should be an integration test - figure out how to do this.
# As is, this test is extremely fragile - it'll likely break on every code change
def test_studio_view(self):
"""Test that studio adds JS workaround."""
mock_fragment = mock.Mock(spec=Fragment)
context = {}
load_js_result = "Load JS result"
with mock.patch("dalite_xblock.dalite_xblock.LtiConsumerXBlock.studio_view") as patched_super, \
mock.patch("dalite_xblock.dalite_xblock.loader.load_unicode") as patched_load_unicode:
patched_super.return_value = mock_fragment
patched_load_unicode.return_value = load_js_result
result = self.block.studio_view(context)
patched_super.assert_called_once_with(context)
patched_load_unicode.assert_called_once_with('public/js/dalite_xblock_edit.js')
self.assertEqual(result, mock_fragment)
mock_fragment.add_javascript.assert_called_once_with(load_js_result)
mock_fragment.initialize_js.assert_called_once_with('DaliteXBlockEdit')
| [
"\"\"\"Tests for Dalite XBLock.\"\"\"\n",
"from unittest import TestCase\n",
"\n",
"import ddt\n",
"import mock\n",
"\n",
"from xblock.core import XBlock\n",
"from xblock.field_data import DictFieldData\n",
"from xblock.fragment import Fragment\n",
"\n",
"from dalite_xblock import dalite_xblock\n",
"from dalite_xblock.dalite_xblock import DaliteXBlock\n",
"from dalite_xblock.passport_utils import DaliteLtiPassport\n",
"from tests.utils import TestWithPatchesMixin\n",
"\n",
"DEFAULT_LTI_PASSPORTS = [\n",
" \"dalite-ng-1:dalite-xblock:aHR0cDovL2ZpcnN0LnVybDo4MDgwO0tFWTtTRUNSRVQ=\",\n",
" \"dalite-ng-2:dalite-xblock:aHR0cDovL290aGVyLnVybDtPVEhFUktFWTtPVEhFUlNFQ1JFVA==\",\n",
" \"dalite-ng-3:dalite-xblock:aHR0cHM6Ly8xOTIuMTY4LjMzLjE7YWxwaGE7YmV0YQ==\",\n",
" \"dalite-ng-4:dalite-xblock:aHR0cHM6Ly9leGFtcGxlLmNvbS87YWxwaGE7YmV0YQ==\"\n",
"]\n",
"\n",
"PARSED_LTI_PASSPORTS = {\n",
" # This is http with a port.\n",
" \"dalite-ng-1\": DaliteLtiPassport(\"dalite-ng-1\", \"http://first.url:8080\", \"KEY\", \"SECRET\"),\n",
" # This one is http without a port\n",
" \"dalite-ng-2\": DaliteLtiPassport(\"dalite-ng-2\", \"http://other.url\", \"OTHERKEY\", \"OTHERSECRET\"),\n",
" # This one is https with IP instead of domain\n",
" \"dalite-ng-3\": DaliteLtiPassport(\"dalite-ng-3\", \"https://192.168.33.1\", \"alpha\", \"beta\"),\n",
" # This one has a trailing slash\n",
" \"dalite-ng-4\": DaliteLtiPassport(\"dalite-ng-3\", \"https://example.com/\", \"alpha\", \"beta\")\n",
"}\n",
"\n",
"\n",
"@ddt.ddt\n",
"class DaliteXBlockTests(TestCase, TestWithPatchesMixin):\n",
" \"\"\"Tests for Dalite XBlock.\"\"\"\n",
"\n",
" DEFAULT_COURSE_ID = \"course-1\"\n",
"\n",
" def setUp(self):\n",
" \"\"\"Obviously, setUP method sets up test environment for each individual test to run.\"\"\"\n",
" self.runtime_mock = mock.Mock()\n",
" self.runtime_mock.course_id = self.DEFAULT_COURSE_ID\n",
" self.block = DaliteXBlock(\n",
" self.runtime_mock, DictFieldData({}), scope_ids=mock.Mock()\n",
" )\n",
"\n",
" self.mock_course = mock.Mock(spec=XBlock)\n",
" self.mock_course.lti_passports = DEFAULT_LTI_PASSPORTS\n",
" self.runtime_mock.modulestore.get_course = mock.Mock(return_value=self.mock_course)\n",
"\n",
" def test_course(self):\n",
" \"\"\"Test course property.\"\"\"\n",
" mock_course = mock.Mock(spec=XBlock)\n",
" self.runtime_mock.modulestore.get_course = mock.Mock(return_value=mock_course)\n",
"\n",
" self.assertEqual(self.block.course, mock_course)\n",
" self.runtime_mock.modulestore.get_course.assert_called_once_with(self.DEFAULT_COURSE_ID)\n",
"\n",
" def test_dalite_xblock_lti_passports(self):\n",
" \"\"\"Test dalite_xblock_lti_passports property.\"\"\"\n",
" with mock.patch.object(dalite_xblock, \"filter_and_parse_passports\") as filter_passwords:\n",
" passports = ['some:mock:passport']\n",
" self.mock_course.lti_passports = passports\n",
" unused_variable_1 = self.block.dalite_xblock_lti_passports\n",
" unused_variable_2 = self.block.dalite_xblock_lti_passports\n",
" self.assertIs(unused_variable_1, unused_variable_2)\n",
" # Calling property twice to check caching behaviour.\n",
" filter_passwords.assert_called_once_with(passports)\n",
"\n",
" @ddt.data(\n",
" ('', None),\n",
" ('missing', None),\n",
" ('dalite-ng-1', PARSED_LTI_PASSPORTS['dalite-ng-1']),\n",
" ('dalite-ng-2', PARSED_LTI_PASSPORTS['dalite-ng-2']),\n",
" ('dalite-ng-3', PARSED_LTI_PASSPORTS['dalite-ng-3'])\n",
" )\n",
" @ddt.unpack\n",
" def test_lti_passport(self, lti_id, expected_result):\n",
" \"\"\"Test lti_passport property.\"\"\"\n",
" self.block.lti_id = lti_id\n",
" self.assertEqual(self.block.lti_passport, expected_result)\n",
"\n",
" @ddt.data(\n",
" ('', ''),\n",
" ('missing', ''),\n",
" ('dalite-ng-1', \"http://first.url:8080/lti/\"),\n",
" ('dalite-ng-2', \"http://other.url/lti/\"),\n",
" ('dalite-ng-3', \"https://192.168.33.1/lti/\"),\n",
" ('dalite-ng-4', \"https://example.com/lti/\")\n",
" )\n",
" @ddt.unpack\n",
" def test_launch_url(self, lti_id, launch_url):\n",
" \"\"\"Test launch_url property.\"\"\"\n",
" self.block.lti_id = lti_id\n",
" self.assertEqual(self.block.launch_url, launch_url)\n",
"\n",
" @ddt.data(\n",
" ('', '', ''),\n",
" ('missing', '', ''),\n",
" ('dalite-ng-1', \"KEY\", \"SECRET\"),\n",
" ('dalite-ng-2', \"OTHERKEY\", \"OTHERSECRET\")\n",
" )\n",
" @ddt.unpack\n",
" def test_key_secret(self, lti_id, key, secret):\n",
" \"\"\"Test lti_provider_key_secret property.\"\"\"\n",
" self.block.lti_id = lti_id\n",
" self.assertEqual(self.block.lti_provider_key_secret, (key, secret))\n",
"\n",
" @ddt.data(\n",
" ([], [DaliteXBlock.NO_LTI_PASSPORTS_OPTION]), # no passports at all\n",
" ([\"dalite-ng:QWE:ASD\"], [DaliteXBlock.NO_LTI_PASSPORTS_OPTION]), # no dalite-xblock passports\n",
" # two dalite and one non-dalite pasport\n",
" (\n",
" [\n",
" \"dalite-ng:QWE:ASD\",\n",
" \"dalite-ng-1:dalite-xblock:aHR0cDovL2ZpcnN0LnVybDo4MDgwO0tFWTtTRUNSRVQ=\",\n",
" \"dalite-ng-2:dalite-xblock:aHR0cDovL290aGVyLnVybDtPVEhFUktFWTtPVEhFUlNFQ1JFVA==\"\n",
" ],\n",
" [\n",
" {\"display_name\": \"dalite-ng-1\", \"value\": \"dalite-ng-1\"},\n",
" {\"display_name\": \"dalite-ng-2\", \"value\": \"dalite-ng-2\"},\n",
" ]\n",
" )\n",
" )\n",
" @ddt.unpack\n",
" def test_lti_id_values_provider(self, lti_passports, expected_result):\n",
" \"\"\"Test lti_id_values_provider.\"\"\"\n",
" self.mock_course.lti_passports = lti_passports\n",
" self.assertEqual(self.block.lti_id_values_provider(), expected_result)\n",
"\n",
" @ddt.data(\n",
" ('', 1), ('asgn#1', 1), ('assignment-2', 3), ('almost-irrelevant', 'almost-irrelevenat-too')\n",
" )\n",
" @ddt.unpack\n",
" def test_clean_studio_edits(self, assignment_id, question_id):\n",
" \"\"\"\n",
" Test clean_studio_edites transforms fields coming from Studio editor.\n",
"\n",
" Two transforms are applied:\n",
" * Sets values to \"fixed\" fields: hide_launch, has_score, ask_to_send_username and ask_to_send_email\n",
" * Sets \"custom_parameters\" from assignment_id and question_id\n",
" \"\"\"\n",
" initial_data = {'assignment_id': assignment_id, 'question_id': question_id}\n",
" expected_result = {\n",
" 'hide_launch': False,\n",
" 'has_score': True,\n",
" 'custom_parameters': [\"assignment_id=\" + str(assignment_id), \"question_id=\" + str(question_id)],\n",
" 'ask_to_send_username': False,\n",
" 'ask_to_send_email': False\n",
" }\n",
" expected_result.update(initial_data) # all initial values should still be there\n",
" data = initial_data.copy()\n",
"\n",
" self.block.clean_studio_edits(data)\n",
" try:\n",
" self.assertEqual(data, expected_result)\n",
" except AssertionError:\n",
" print \"Intitial: \", initial_data\n",
" print \"Actual: \", data\n",
" print \"Expected: \", expected_result\n",
" raise\n",
"\n",
" def test_is_ready_positive(self):\n",
" \"\"\"Test is_ready method returns true when has all the data.\"\"\"\n",
" block = DaliteXBlock(\n",
" self.runtime_mock, DictFieldData({\n",
" 'question_id': '4', 'assignment_id': 'foo', 'lti_id': 'dalite-ng-1'\n",
" }),\n",
" scope_ids=mock.Mock()\n",
" )\n",
" self.assertTrue(block.is_lti_ready)\n",
"\n",
" def test_is_ready_negative(self):\n",
" \"\"\"Test is_ready method returns false without the data.\"\"\"\n",
" block = DaliteXBlock(\n",
" self.runtime_mock, DictFieldData({}),\n",
" scope_ids=mock.Mock()\n",
" )\n",
" self.assertFalse(block.is_lti_ready)\n",
"\n",
" def test_add_custom_parameters(self):\n",
" \"\"\"Test for add_extra_custom_params contextmanager.\"\"\"\n",
" canary = ['param1=value1']\n",
" additional_params = [\"param2=value2\", \"param3=value3\"]\n",
" self.block.custom_parameters = canary\n",
"\n",
" with self.block.add_extra_custom_params(additional_params):\n",
" self.assertEqual(len(self.block.custom_parameters), len(additional_params + canary))\n",
" self.assertEqual(set(self.block.custom_parameters), set(additional_params + canary))\n",
"\n",
" self.assertIs(self.block.custom_parameters, canary)\n",
" self.assertEqual(self.block.custom_parameters, ['param1=value1'])\n",
"\n",
" @ddt.data(\n",
" # For requests without suffix there is no extra parameters\n",
" ('', []),\n",
" (DaliteXBlock.ADMIN_URL_SUFFIX, [u'action=launch-admin']),\n",
" (DaliteXBlock.EDIT_QUESTION_SUFFIX, [u'action=edit-question'])\n",
"\n",
" )\n",
" @ddt.unpack\n",
" def test_lti_launch_handler(self, suffix, expected_params):\n",
" \"\"\"Test for lti_launch_handler method.\"\"\"\n",
" request_canary = object()\n",
"\n",
" # Workaround around lack of nonlocal in python 2\n",
" actual_values = {}\n",
"\n",
" def super_handler_mock(self, request, suffix=''):\n",
" \"\"\"A mock version of lti_launch_handler that will be attached to super call.\"\"\"\n",
" actual_values['actual_params'] = self.custom_parameters\n",
" actual_values['actual_request'] = request\n",
" actual_values['actual_suffix'] = suffix\n",
"\n",
" with mock.patch(\"dalite_xblock.dalite_xblock.LtiConsumerXBlock.lti_launch_handler\", super_handler_mock):\n",
" self.block.lti_launch_handler(request_canary, suffix)\n",
"\n",
" self.assertIs(request_canary, actual_values['actual_request'])\n",
" self.assertEqual('', actual_values['actual_suffix'])\n",
" self.assertEqual(expected_params, actual_values['actual_params'])\n",
"\n",
" def test_render_admin_button(self):\n",
" \"\"\"Test for the render_button_launching_admin method.\"\"\"\n",
" rendered_canary = \"I'm `HTML` template **pinky swear**\"\n",
" form_url_suffix = \"/test\"\n",
" button_label = \"Press Me\"\n",
" id_specifier = \"launch-admin\"\n",
" context = {\n",
" \"form_url\": \"deadbeef/lti_launch_handler\",\n",
" }\n",
"\n",
" with mock.patch('dalite_xblock.dalite_xblock.DaliteXBlock._get_context_for_template') as context, \\\n",
" mock.patch(\n",
" \"dalite_xblock.dalite_xblock.loader.render_django_template\", return_value=rendered_canary) as render:\n",
" context.return_value = {}\n",
" result = self.block.render_button_launching_admin(context, form_url_suffix, button_label, id_specifier)\n",
" self.assertEqual(result, rendered_canary)\n",
"\n",
" render.assert_called_once_with(\n",
" '/templates/dalite_xblock_lti_iframe.html',\n",
" {\n",
" 'element_id_specifier': 'launch-admin', 'dalite_admin_label': 'Press Me', 'has_score': False,\n",
" 'form_url_suffix': '/test'\n",
" }\n",
" )\n",
"\n",
" # TODO: should be an integration test - figure out how to do this\n",
" # AS is, this test is extremely fragile - it'll likely break on every code change\n",
" def test_student_view(self):\n",
" \"\"\"Test that student_view adds JS workaround.\"\"\"\n",
" mock_fragment = mock.Mock(spec=Fragment)\n",
" context = {}\n",
" load_js_result = \"Load JS result\"\n",
" with mock.patch(\"dalite_xblock.dalite_xblock.LtiConsumerXBlock.student_view\") as patched_super, \\\n",
" mock.patch(\"dalite_xblock.dalite_xblock.loader.load_unicode\") as patched_load_unicode, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready', new_callable=mock.PropertyMock) as is_ready:\n",
" patched_super.return_value = mock_fragment\n",
" patched_load_unicode.return_value = load_js_result\n",
" is_ready.return_value = True\n",
"\n",
" result = self.block.student_view(context)\n",
" patched_super.assert_called_once_with(context)\n",
" patched_load_unicode.assert_called_once_with('public/js/dalite_xblock.js')\n",
"\n",
" self.assertEqual(result, mock_fragment)\n",
" mock_fragment.add_javascript.assert_called_once_with(load_js_result)\n",
" mock_fragment.initialize_js.assert_called_once_with('DaliteXBlock')\n",
"\n",
" def _do_error_page_test(self, view_to_test, is_in_studio):\n",
" with mock.patch(\"dalite_xblock.dalite_xblock.LtiConsumerXBlock.student_view\") as patched_super, \\\n",
" mock.patch('dalite_xblock.dalite_xblock.DaliteXBlock._get_context_for_template') as context, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',\n",
" new_callable=mock.PropertyMock) as is_ready, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.get_status_message') as get_status_message:\n",
" context.return_value = {}\n",
" is_ready.return_value = False\n",
" patched_super.return_value = Fragment()\n",
" get_status_message.return_value = \"Some error\"\n",
"\n",
" result = view_to_test({})\n",
" self.assertIn(\n",
" \"Some error\",\n",
" result.body_html()\n",
" )\n",
" get_status_message.assert_called_once_with(is_in_studio)\n",
"\n",
" def test_author_view_error(self):\n",
" \"\"\"Test author view calls get_status_message.\"\"\"\n",
" self._do_error_page_test(self.block.author_view, True)\n",
"\n",
" @ddt.data(\n",
" # If lti is ready there is no error message\n",
" (None, True, True, True),\n",
" (None, False, True, True),\n",
" # If lti is not ready display the same message in LMS\n",
" (DaliteXBlock.LMS_ERROR_MESSAGE, False, False, False),\n",
" (DaliteXBlock.LMS_ERROR_MESSAGE, False, False, True),\n",
" # In LMS display different message depending on conditions\n",
" (DaliteXBlock.CMS_NO_PASSPORT_ERROR, True, False, False),\n",
" (DaliteXBlock.CMS_NO_QUESTION_ERROR, True, False, True),\n",
" )\n",
" @ddt.unpack\n",
" def test_get_message(self, expected_message, in_studio, is_ready_value, has_launch_url):\n",
" \"\"\"Test author view calls get_status_message.\"\"\"\n",
" with mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.launch_url',\n",
" new_callable=mock.PropertyMock) as launch_url, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',\n",
" new_callable=mock.PropertyMock) as is_ready:\n",
" launch_url.return_value = \"http://dalite\" if has_launch_url else ''\n",
" is_ready.return_value = is_ready_value\n",
" actual_message = self.block.get_status_message(in_studio)\n",
" self.assertEqual(actual_message, expected_message)\n",
"\n",
" def test_author_view_ok(self):\n",
" \"\"\"Test author view runs successfully.\"\"\"\n",
" render_result = Fragment()\n",
" with mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.render_student_view',\n",
" return_value=render_result), \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.render_button_launching_admin',\n",
" return_value=u'') as render_button, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.is_lti_ready',\n",
" new_callable=mock.PropertyMock) as is_ready, \\\n",
" mock.patch(\n",
" 'dalite_xblock.dalite_xblock.DaliteXBlock.launch_url',\n",
" new_callable=mock.PropertyMock) as launch_url:\n",
"\n",
" is_ready.return_value = True\n",
" launch_url.return_value = \"http://example.com/lti/\"\n",
"\n",
" self.block.author_view({})\n",
" self.assertEqual(len(render_button.call_args_list), 2)\n",
"\n",
" def test_student_view_error(self):\n",
" \"\"\"Test student view calls get_status_message.\"\"\"\n",
" self._do_error_page_test(self.block.student_view, False)\n",
"\n",
" # TODO: should be an integration test - figure out how to do this.\n",
" # As is, this test is extremely fragile - it'll likely break on every code change\n",
" def test_studio_view(self):\n",
" \"\"\"Test that studio adds JS workaround.\"\"\"\n",
" mock_fragment = mock.Mock(spec=Fragment)\n",
" context = {}\n",
" load_js_result = \"Load JS result\"\n",
" with mock.patch(\"dalite_xblock.dalite_xblock.LtiConsumerXBlock.studio_view\") as patched_super, \\\n",
" mock.patch(\"dalite_xblock.dalite_xblock.loader.load_unicode\") as patched_load_unicode:\n",
" patched_super.return_value = mock_fragment\n",
" patched_load_unicode.return_value = load_js_result\n",
"\n",
" result = self.block.studio_view(context)\n",
" patched_super.assert_called_once_with(context)\n",
" patched_load_unicode.assert_called_once_with('public/js/dalite_xblock_edit.js')\n",
"\n",
" self.assertEqual(result, mock_fragment)\n",
" mock_fragment.add_javascript.assert_called_once_with(load_js_result)\n",
" mock_fragment.initialize_js.assert_called_once_with('DaliteXBlockEdit')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0.01,
0,
0.010638297872340425,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.010309278350515464,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0.011111111111111112,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0.008928571428571428,
0,
0,
0.011904761904761904,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0.00847457627118644,
0,
0.008620689655172414,
0,
0,
0,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.009433962264150943,
0.009900990099009901,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.012345679012345678,
0,
0,
0,
0.009433962264150943,
0.009345794392523364,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.009523809523809525,
0.009708737864077669,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0.012345679012345678,
0.011904761904761904
] | 365 | 0.001251 |
# -*- coding: utf-8 -*-
'''
Queries for experiments with SEQ operator
'''
import os
from tool.attributes import get_move_attribute_list, get_place_attribute_list
from tool.experiment import SLI, RAN, ALGORITHM, \
CQL_ALG, QUERY, Q_MOVE, Q_PLACE
from tool.io import get_query_dir, write_to_txt, get_out_file, get_env_file
from tool.query.stream import get_register_stream, REG_Q_OUTPUT_STR, REG_Q_STR
# =============================================================================
# Query using SEQ operator
# =============================================================================
SEQ_QUERY = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s;
'''
# =============================================================================
# CQL Equivalent Queries
# =============================================================================
# RPOS (_pos attribute with original timestamp)
CQL_RPOS = 'SELECT _ts AS _pos, * FROM s[RANGE 1 SECOND, SLIDE 1 SECOND];'
# SPOS (convert RPOS back to stream format)
CQL_SPOS = 'SELECT RSTREAM FROM rpos;'
# W (Window of tuples from SPOS)
CQL_W = '''
SELECT _pos, {att}
FROM spos[RANGE {ran} SECOND, SLIDE {sli} SECOND];
'''
# W_1 (Sequence positions from 1 to end)
CQL_W1 = 'SELECT _pos, player_id FROM w;'
# W_i (Sequence positions from i to end, w_(i-1) - p_(i-1))
CQL_WI = '''
SELECT * FROM w{prev}
EXCEPT
SELECT * FROM p{prev};
'''
# P_i (Tuples with minimum _pos for each identifier)
CQL_PI = '''
SELECT MIN(_pos) AS _pos, player_id FROM w{pos}
GROUP BY player_id;
'''
CQL_PI_FINAL = '''
SELECT {pos} AS _pos, {att} FROM p{pos}, w
WHERE p{pos}.player_id = w.player_id AND p{pos}._pos = w._pos
'''
def gen_seq_query(configuration, experiment_conf):
'''
Generate SEQ query
'''
query_dir = get_query_dir(configuration, experiment_conf)
filename = query_dir + os.sep + 'seq.cql'
query = SEQ_QUERY.format(ran=experiment_conf[RAN],
sli=experiment_conf[SLI])
write_to_txt(filename, query)
def gen_cql_position_queries(query_dir, experiment_conf):
'''
Generate queries to get each position
'''
# Generate W_1
filename = query_dir + os.sep + 'w1.cql'
write_to_txt(filename, CQL_W1)
# W_i
for range_value in range(2, experiment_conf[RAN] + 1):
query = CQL_WI.format(prev=range_value - 1)
filename = query_dir + os.sep + \
'w' + str(range_value) + '.cql'
write_to_txt(filename, query)
# P_i
for range_value in range(1, experiment_conf[RAN] + 1):
query = CQL_PI.format(pos=range_value)
filename = query_dir + os.sep + \
'p' + str(range_value) + '.cql'
write_to_txt(filename, query)
def gen_cql_w_query(query_dir, experiment_conf):
'''
Consider RANGE and SLIDE and generate W relation
'''
query = experiment_conf[QUERY]
if query == Q_MOVE:
att_list = get_move_attribute_list()
elif query == Q_PLACE:
att_list = get_place_attribute_list()
# Build attribute names list
att_str = ', '.join(att_list)
# W
query = CQL_W.format(att=att_str, ran=experiment_conf[RAN],
sli=experiment_conf[SLI])
filename = query_dir + os.sep + 'w.cql'
write_to_txt(filename, query)
def gen_cql_equiv_query(query_dir, experiment_conf):
'''
Generate final CQL query
'''
# Get attribute list
query = experiment_conf[QUERY]
if query == Q_MOVE:
att_list = get_move_attribute_list(prefix='w.')
elif query == Q_PLACE:
att_list = get_place_attribute_list(prefix='w.')
att_str = ', '.join(att_list)
# List of final position queries
pos_query_list = []
for position in range(1, experiment_conf[RAN] + 1):
pos_query = CQL_PI_FINAL.format(pos=position, att=att_str)
pos_query_list.append(pos_query)
# Equivalent is the union of final positions
query = '\nUNION\n'.join(pos_query_list) + ';'
filename = query_dir + os.sep + 'equiv.cql'
write_to_txt(filename, query)
def gen_cql_rpos_spos_queries(query_dir):
'''
Generate RPOS and SPOS queries
'''
filename = query_dir + os.sep + 'rpos.cql'
write_to_txt(filename, CQL_RPOS)
filename = query_dir + os.sep + 'spos.cql'
write_to_txt(filename, CQL_SPOS)
def gen_cql_queries(configuration, experiment_conf):
'''
Generate all CQL queries
'''
query_dir = get_query_dir(configuration, experiment_conf)
gen_cql_rpos_spos_queries(query_dir)
gen_cql_position_queries(query_dir, experiment_conf)
gen_cql_w_query(query_dir, experiment_conf)
gen_cql_equiv_query(query_dir, experiment_conf)
def gen_all_queries(configuration, experiment_list):
'''
Generate all queries
'''
for exp_conf in experiment_list:
if exp_conf[ALGORITHM] == CQL_ALG:
gen_cql_queries(configuration, exp_conf)
else:
gen_seq_query(configuration, exp_conf)
def gen_seq_env(configuration, experiment_conf, output):
'''
Generate environment for SEQ
'''
text = get_register_stream(experiment_conf)
# Get query filename
query_dir = get_query_dir(configuration, experiment_conf)
filename = query_dir + os.sep + 'seq.cql'
# Register query
if output:
# Get output filename
out_file = get_out_file(configuration, experiment_conf)
text += REG_Q_OUTPUT_STR.format(qname='seq', qfile=filename,
ofile=out_file)
else:
text += REG_Q_STR.format(qname='seq', qfile=filename)
# Get environment filename
filename = get_env_file(configuration, experiment_conf)
write_to_txt(filename, text)
def gen_cql_env(configuration, experiment_conf, output):
'''
Generate environment for CQL
'''
text = get_register_stream(experiment_conf)
query_dir = get_query_dir(configuration, experiment_conf)
# Environment files for equivalent CQL queries
# RPOS
filename = query_dir + os.sep + 'rpos.cql'
text += REG_Q_STR.format(qname='rpos', qfile=filename)
# SPOS
filename = query_dir + os.sep + 'spos.cql'
text += REG_Q_STR.format(qname='spos', qfile=filename)
# W
filename = query_dir + os.sep + 'w.cql'
text += REG_Q_STR.format(qname='w', qfile=filename)
# W1 and P1
filename = query_dir + os.sep + 'w1.cql'
text += REG_Q_STR.format(qname='w1', qfile=filename)
filename = query_dir + os.sep + 'p1.cql'
text += REG_Q_STR.format(qname='p1', qfile=filename)
# W_i and P_i
range_value = experiment_conf[RAN]
for pos in range(2, range_value + 1):
filename = query_dir + os.sep + 'w' + str(pos) + '.cql'
text += REG_Q_STR.format(qname='w' + str(pos), qfile=filename)
filename = query_dir + os.sep + 'p' + str(pos) + '.cql'
text += REG_Q_STR.format(qname='p' + str(pos), qfile=filename)
# Final equivalent query
filename = query_dir + os.sep + 'equiv.cql'
if output:
# Get output filename
out_file = get_out_file(configuration, experiment_conf)
text += REG_Q_OUTPUT_STR.format(qname='equiv', qfile=filename,
ofile=out_file)
else:
text += REG_Q_STR.format(qname='equiv', qfile=filename)
filename = get_env_file(configuration, experiment_conf)
write_to_txt(filename, text)
def gen_all_env(configuration, experiment_list, output=False):
'''
Generate all environments
'''
for exp_conf in experiment_list:
if exp_conf[ALGORITHM] == CQL_ALG:
gen_cql_env(configuration, exp_conf, output)
else:
gen_seq_env(configuration, exp_conf, output)
| [
"# -*- coding: utf-8 -*-\n",
"'''\n",
"Queries for experiments with SEQ operator\n",
"'''\n",
"\n",
"import os\n",
"\n",
"from tool.attributes import get_move_attribute_list, get_place_attribute_list\n",
"from tool.experiment import SLI, RAN, ALGORITHM, \\\n",
" CQL_ALG, QUERY, Q_MOVE, Q_PLACE\n",
"from tool.io import get_query_dir, write_to_txt, get_out_file, get_env_file\n",
"from tool.query.stream import get_register_stream, REG_Q_OUTPUT_STR, REG_Q_STR\n",
"\n",
"\n",
"# =============================================================================\n",
"# Query using SEQ operator\n",
"# =============================================================================\n",
"SEQ_QUERY = '''\n",
"SELECT SEQUENCE IDENTIFIED BY player_id\n",
"[RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s;\n",
"'''\n",
"\n",
"# =============================================================================\n",
"# CQL Equivalent Queries\n",
"# =============================================================================\n",
"# RPOS (_pos attribute with original timestamp)\n",
"CQL_RPOS = 'SELECT _ts AS _pos, * FROM s[RANGE 1 SECOND, SLIDE 1 SECOND];'\n",
"# SPOS (convert RPOS back to stream format)\n",
"CQL_SPOS = 'SELECT RSTREAM FROM rpos;'\n",
"# W (Window of tuples from SPOS)\n",
"CQL_W = '''\n",
"SELECT _pos, {att}\n",
"FROM spos[RANGE {ran} SECOND, SLIDE {sli} SECOND];\n",
"'''\n",
"# W_1 (Sequence positions from 1 to end)\n",
"CQL_W1 = 'SELECT _pos, player_id FROM w;'\n",
"# W_i (Sequence positions from i to end, w_(i-1) - p_(i-1))\n",
"CQL_WI = '''\n",
"SELECT * FROM w{prev}\n",
"EXCEPT\n",
"SELECT * FROM p{prev};\n",
"'''\n",
"# P_i (Tuples with minimum _pos for each identifier)\n",
"CQL_PI = '''\n",
"SELECT MIN(_pos) AS _pos, player_id FROM w{pos}\n",
"GROUP BY player_id;\n",
"'''\n",
"CQL_PI_FINAL = '''\n",
" SELECT {pos} AS _pos, {att} FROM p{pos}, w\n",
" WHERE p{pos}.player_id = w.player_id AND p{pos}._pos = w._pos\n",
" '''\n",
"\n",
"\n",
"def gen_seq_query(configuration, experiment_conf):\n",
" '''\n",
" Generate SEQ query\n",
" '''\n",
" query_dir = get_query_dir(configuration, experiment_conf)\n",
" filename = query_dir + os.sep + 'seq.cql'\n",
" query = SEQ_QUERY.format(ran=experiment_conf[RAN],\n",
" sli=experiment_conf[SLI])\n",
" write_to_txt(filename, query)\n",
"\n",
"\n",
"def gen_cql_position_queries(query_dir, experiment_conf):\n",
" '''\n",
" Generate queries to get each position\n",
" '''\n",
" # Generate W_1\n",
" filename = query_dir + os.sep + 'w1.cql'\n",
" write_to_txt(filename, CQL_W1)\n",
" # W_i\n",
" for range_value in range(2, experiment_conf[RAN] + 1):\n",
" query = CQL_WI.format(prev=range_value - 1)\n",
" filename = query_dir + os.sep + \\\n",
" 'w' + str(range_value) + '.cql'\n",
" write_to_txt(filename, query)\n",
" # P_i\n",
" for range_value in range(1, experiment_conf[RAN] + 1):\n",
" query = CQL_PI.format(pos=range_value)\n",
" filename = query_dir + os.sep + \\\n",
" 'p' + str(range_value) + '.cql'\n",
" write_to_txt(filename, query)\n",
"\n",
"\n",
"def gen_cql_w_query(query_dir, experiment_conf):\n",
" '''\n",
" Consider RANGE and SLIDE and generate W relation\n",
" '''\n",
" query = experiment_conf[QUERY]\n",
" if query == Q_MOVE:\n",
" att_list = get_move_attribute_list()\n",
" elif query == Q_PLACE:\n",
" att_list = get_place_attribute_list()\n",
" # Build attribute names list\n",
" att_str = ', '.join(att_list)\n",
" # W\n",
" query = CQL_W.format(att=att_str, ran=experiment_conf[RAN],\n",
" sli=experiment_conf[SLI])\n",
" filename = query_dir + os.sep + 'w.cql'\n",
" write_to_txt(filename, query)\n",
"\n",
"\n",
"def gen_cql_equiv_query(query_dir, experiment_conf):\n",
" '''\n",
" Generate final CQL query\n",
" '''\n",
" # Get attribute list\n",
" query = experiment_conf[QUERY]\n",
" if query == Q_MOVE:\n",
" att_list = get_move_attribute_list(prefix='w.')\n",
" elif query == Q_PLACE:\n",
" att_list = get_place_attribute_list(prefix='w.')\n",
" att_str = ', '.join(att_list)\n",
" # List of final position queries\n",
" pos_query_list = []\n",
" for position in range(1, experiment_conf[RAN] + 1):\n",
" pos_query = CQL_PI_FINAL.format(pos=position, att=att_str)\n",
" pos_query_list.append(pos_query)\n",
" # Equivalent is the union of final positions\n",
" query = '\\nUNION\\n'.join(pos_query_list) + ';'\n",
" filename = query_dir + os.sep + 'equiv.cql'\n",
" write_to_txt(filename, query)\n",
"\n",
"\n",
"def gen_cql_rpos_spos_queries(query_dir):\n",
" '''\n",
" Generate RPOS and SPOS queries\n",
" '''\n",
" filename = query_dir + os.sep + 'rpos.cql'\n",
" write_to_txt(filename, CQL_RPOS)\n",
" filename = query_dir + os.sep + 'spos.cql'\n",
" write_to_txt(filename, CQL_SPOS)\n",
"\n",
"\n",
"def gen_cql_queries(configuration, experiment_conf):\n",
" '''\n",
" Generate all CQL queries\n",
" '''\n",
" query_dir = get_query_dir(configuration, experiment_conf)\n",
" gen_cql_rpos_spos_queries(query_dir)\n",
" gen_cql_position_queries(query_dir, experiment_conf)\n",
" gen_cql_w_query(query_dir, experiment_conf)\n",
" gen_cql_equiv_query(query_dir, experiment_conf)\n",
"\n",
"\n",
"def gen_all_queries(configuration, experiment_list):\n",
" '''\n",
" Generate all queries\n",
" '''\n",
" for exp_conf in experiment_list:\n",
" if exp_conf[ALGORITHM] == CQL_ALG:\n",
" gen_cql_queries(configuration, exp_conf)\n",
" else:\n",
" gen_seq_query(configuration, exp_conf)\n",
"\n",
"\n",
"def gen_seq_env(configuration, experiment_conf, output):\n",
" '''\n",
" Generate environment for SEQ\n",
" '''\n",
" text = get_register_stream(experiment_conf)\n",
" # Get query filename\n",
" query_dir = get_query_dir(configuration, experiment_conf)\n",
" filename = query_dir + os.sep + 'seq.cql'\n",
" # Register query\n",
" if output:\n",
" # Get output filename\n",
" out_file = get_out_file(configuration, experiment_conf)\n",
" text += REG_Q_OUTPUT_STR.format(qname='seq', qfile=filename,\n",
" ofile=out_file)\n",
" else:\n",
" text += REG_Q_STR.format(qname='seq', qfile=filename)\n",
" # Get environment filename\n",
" filename = get_env_file(configuration, experiment_conf)\n",
" write_to_txt(filename, text)\n",
"\n",
"\n",
"def gen_cql_env(configuration, experiment_conf, output):\n",
" '''\n",
" Generate environment for CQL\n",
" '''\n",
" text = get_register_stream(experiment_conf)\n",
" query_dir = get_query_dir(configuration, experiment_conf)\n",
" # Environment files for equivalent CQL queries\n",
" # RPOS\n",
" filename = query_dir + os.sep + 'rpos.cql'\n",
" text += REG_Q_STR.format(qname='rpos', qfile=filename)\n",
" # SPOS\n",
" filename = query_dir + os.sep + 'spos.cql'\n",
" text += REG_Q_STR.format(qname='spos', qfile=filename)\n",
" # W\n",
" filename = query_dir + os.sep + 'w.cql'\n",
" text += REG_Q_STR.format(qname='w', qfile=filename)\n",
" # W1 and P1\n",
" filename = query_dir + os.sep + 'w1.cql'\n",
" text += REG_Q_STR.format(qname='w1', qfile=filename)\n",
" filename = query_dir + os.sep + 'p1.cql'\n",
" text += REG_Q_STR.format(qname='p1', qfile=filename)\n",
" # W_i and P_i\n",
" range_value = experiment_conf[RAN]\n",
" for pos in range(2, range_value + 1):\n",
" filename = query_dir + os.sep + 'w' + str(pos) + '.cql'\n",
" text += REG_Q_STR.format(qname='w' + str(pos), qfile=filename)\n",
" filename = query_dir + os.sep + 'p' + str(pos) + '.cql'\n",
" text += REG_Q_STR.format(qname='p' + str(pos), qfile=filename)\n",
" # Final equivalent query\n",
" filename = query_dir + os.sep + 'equiv.cql'\n",
" if output:\n",
" # Get output filename\n",
" out_file = get_out_file(configuration, experiment_conf)\n",
" text += REG_Q_OUTPUT_STR.format(qname='equiv', qfile=filename,\n",
" ofile=out_file)\n",
" else:\n",
" text += REG_Q_STR.format(qname='equiv', qfile=filename)\n",
" filename = get_env_file(configuration, experiment_conf)\n",
" write_to_txt(filename, text)\n",
"\n",
"\n",
"def gen_all_env(configuration, experiment_list, output=False):\n",
" '''\n",
" Generate all environments\n",
" '''\n",
" for exp_conf in experiment_list:\n",
" if exp_conf[ALGORITHM] == CQL_ALG:\n",
" gen_cql_env(configuration, exp_conf, output)\n",
" else:\n",
" gen_seq_env(configuration, exp_conf, output)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 228 | 0 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import logging
from common import chrome_proxy_metrics as metrics
from telemetry.core import exceptions
from telemetry.page import page_test
def WaitForViaHeader(tab, url="http://check.googlezip.net/test.html"):
"""Wait until responses start coming back with the Chrome Proxy via header.
Poll |url| in |tab| until the Chrome Proxy via header is present in a
response.
This function is useful when testing with the Data Saver API, since Chrome
won't actually start sending requests to the Data Reduction Proxy until the
Data Saver API fetch completes. This function can be used to wait for the Data
Saver API fetch to complete.
"""
tab.Navigate('data:text/html;base64,%s' % base64.b64encode(
'<html><body><script>'
'function ProbeViaHeader(url, wanted_via) {'
'var xmlhttp = new XMLHttpRequest();'
'xmlhttp.open("HEAD",url,false);'
'xmlhttp.send();'
'var via=xmlhttp.getResponseHeader("via");'
'return (via && via.indexOf(wanted_via) != -1);'
'}'
'</script>'
'Waiting for Chrome to start using the DRP...'
'</body></html>'))
tab.WaitForJavaScriptExpression(
'ProbeViaHeader("%s", "%s")' % (url, metrics.CHROME_PROXY_VIA_HEADER), 300)
class ChromeProxyValidation(page_test.PageTest):
"""Base class for all chrome proxy correctness measurements."""
# Value of the extra via header. |None| if no extra via header is expected.
extra_via_header = None
def __init__(self, restart_after_each_page=False, metrics=None):
super(ChromeProxyValidation, self).__init__(
needs_browser_restart_after_each_page=restart_after_each_page)
self._metrics = metrics
self._page = None
def CustomizeBrowserOptions(self, options):
# Enable the chrome proxy (data reduction proxy).
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
def WillNavigateToPage(self, page, tab):
WaitForViaHeader(tab)
tab.ClearCache(force=True)
assert self._metrics
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._page = page
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
assert self._metrics
self._metrics.Stop(page, tab)
if ChromeProxyValidation.extra_via_header:
self._metrics.AddResultsForExtraViaHeader(
tab, results, ChromeProxyValidation.extra_via_header)
self.AddResults(tab, results)
def AddResults(self, tab, results):
raise NotImplementedError
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
if hasattr(page, 'restart_after') and page.restart_after:
return True
return False
| [
"# Copyright 2014 The Chromium Authors. All rights reserved.\n",
"# Use of this source code is governed by a BSD-style license that can be\n",
"# found in the LICENSE file.\n",
"\n",
"import base64\n",
"import logging\n",
"\n",
"from common import chrome_proxy_metrics as metrics\n",
"from telemetry.core import exceptions\n",
"from telemetry.page import page_test\n",
"\n",
"\n",
"def WaitForViaHeader(tab, url=\"http://check.googlezip.net/test.html\"):\n",
" \"\"\"Wait until responses start coming back with the Chrome Proxy via header.\n",
"\n",
" Poll |url| in |tab| until the Chrome Proxy via header is present in a\n",
" response.\n",
"\n",
" This function is useful when testing with the Data Saver API, since Chrome\n",
" won't actually start sending requests to the Data Reduction Proxy until the\n",
" Data Saver API fetch completes. This function can be used to wait for the Data\n",
" Saver API fetch to complete.\n",
" \"\"\"\n",
"\n",
" tab.Navigate('data:text/html;base64,%s' % base64.b64encode(\n",
" '<html><body><script>'\n",
" 'function ProbeViaHeader(url, wanted_via) {'\n",
" 'var xmlhttp = new XMLHttpRequest();'\n",
" 'xmlhttp.open(\"HEAD\",url,false);'\n",
" 'xmlhttp.send();'\n",
" 'var via=xmlhttp.getResponseHeader(\"via\");'\n",
" 'return (via && via.indexOf(wanted_via) != -1);'\n",
" '}'\n",
" '</script>'\n",
" 'Waiting for Chrome to start using the DRP...'\n",
" '</body></html>'))\n",
"\n",
" tab.WaitForJavaScriptExpression(\n",
" 'ProbeViaHeader(\"%s\", \"%s\")' % (url, metrics.CHROME_PROXY_VIA_HEADER), 300)\n",
"\n",
"\n",
"class ChromeProxyValidation(page_test.PageTest):\n",
" \"\"\"Base class for all chrome proxy correctness measurements.\"\"\"\n",
"\n",
" # Value of the extra via header. |None| if no extra via header is expected.\n",
" extra_via_header = None\n",
"\n",
" def __init__(self, restart_after_each_page=False, metrics=None):\n",
" super(ChromeProxyValidation, self).__init__(\n",
" needs_browser_restart_after_each_page=restart_after_each_page)\n",
" self._metrics = metrics\n",
" self._page = None\n",
"\n",
" def CustomizeBrowserOptions(self, options):\n",
" # Enable the chrome proxy (data reduction proxy).\n",
" options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')\n",
"\n",
" def WillNavigateToPage(self, page, tab):\n",
" WaitForViaHeader(tab)\n",
"\n",
" tab.ClearCache(force=True)\n",
" assert self._metrics\n",
" self._metrics.Start(page, tab)\n",
"\n",
" def ValidateAndMeasurePage(self, page, tab, results):\n",
" self._page = page\n",
" # Wait for the load event.\n",
" tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)\n",
" assert self._metrics\n",
" self._metrics.Stop(page, tab)\n",
" if ChromeProxyValidation.extra_via_header:\n",
" self._metrics.AddResultsForExtraViaHeader(\n",
" tab, results, ChromeProxyValidation.extra_via_header)\n",
" self.AddResults(tab, results)\n",
"\n",
" def AddResults(self, tab, results):\n",
" raise NotImplementedError\n",
"\n",
" def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613\n",
" if hasattr(page, 'restart_after') and page.restart_after:\n",
" return True\n",
" return False\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0.016129032258064516,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.015151515151515152,
0,
0.01282051282051282,
0.038461538461538464,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.02631578947368421,
0,
0,
0.013513513513513514,
0,
0.05555555555555555,
0
] | 82 | 0.0043 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import json
from frappe.utils import flt, cstr, nowdate, nowtime
class InvalidWarehouseCompany(frappe.ValidationError): pass
def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date: posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
values.append(warehouse)
condition += " AND warehouse = %s"
if item_code:
values.append(item_code)
condition.append(" AND item_code = %s")
stock_ledger_entries = frappe.db.sql("""
SELECT item_code, stock_value
FROM `tabStock Ledger Entry`
WHERE posting_date <= %s {0}
ORDER BY timestamp(posting_date, posting_time) DESC, name DESC
""".format(condition), values, as_dict=1)
sle_map = {}
for sle in stock_ledger_entries:
sle_map.setdefault(sle.item_code, flt(sle.stock_value))
return sum(sle_map.values())
def get_stock_balance(item_code, warehouse, posting_date=None, posting_time=None, with_valuation_rate=False):
"""Returns stock balance quantity at given warehouse on given posting date or current date.
If `with_valuation_rate` is True, will return tuple (qty, rate)"""
from erpnext.stock.stock_ledger import get_previous_sle
if not posting_date: posting_date = nowdate()
if not posting_time: posting_time = nowtime()
last_entry = get_previous_sle({
"item_code": item_code,
"warehouse":warehouse,
"posting_date": posting_date,
"posting_time": posting_time })
if with_valuation_rate:
return (last_entry.qty_after_transaction, last_entry.valuation_rate) if last_entry else (0.0, 0.0)
else:
return last_entry.qty_after_transaction or 0.0
def get_latest_stock_balance():
bin_map = {}
for d in frappe.db.sql("""SELECT item_code, warehouse, stock_value as stock_value
FROM tabBin""", as_dict=1):
bin_map.setdefault(d.warehouse, {}).setdefault(d.item_code, flt(d.stock_value))
return bin_map
def get_bin(item_code, warehouse):
bin = frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse})
if not bin:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.insert()
else:
bin_obj = frappe.get_doc('Bin', bin)
bin_obj.flags.ignore_permissions = True
return bin_obj
def update_bin(args, allow_negative_stock=False, via_landed_cost_voucher=False):
is_stock_item = frappe.db.get_value('Item', args.get("item_code"), 'is_stock_item')
if is_stock_item:
bin = get_bin(args.get("item_code"), args.get("warehouse"))
bin.update_stock(args, allow_negative_stock, via_landed_cost_voucher)
return bin
else:
frappe.msgprint(_("Item {0} ignored since it is not a stock item").format(args.get("item_code")))
def get_incoming_rate(args):
"""Get Incoming Rate based on valuation method"""
from erpnext.stock.stock_ledger import get_previous_sle
in_rate = 0
if (args.get("serial_no") or "").strip():
in_rate = get_avg_purchase_rate(args.get("serial_no"))
else:
valuation_method = get_valuation_method(args.get("item_code"))
previous_sle = get_previous_sle(args)
if valuation_method == 'FIFO':
if not previous_sle:
return 0.0
previous_stock_queue = json.loads(previous_sle.get('stock_queue', '[]') or '[]')
in_rate = get_fifo_rate(previous_stock_queue, args.get("qty") or 0) if previous_stock_queue else 0
elif valuation_method == 'Moving Average':
in_rate = previous_sle.get('valuation_rate') or 0
return in_rate
def get_avg_purchase_rate(serial_nos):
"""get average value of serial numbers"""
serial_nos = get_valid_serial_nos(serial_nos)
return flt(frappe.db.sql("""select avg(ifnull(purchase_rate, 0)) from `tabSerial No`
where name in (%s)""" % ", ".join(["%s"] * len(serial_nos)),
tuple(serial_nos))[0][0])
def get_valuation_method(item_code):
"""get valuation method from item or default"""
val_method = frappe.db.get_value('Item', item_code, 'valuation_method')
if not val_method:
val_method = frappe.db.get_value("Stock Settings", None, "valuation_method") or "FIFO"
return val_method
def get_fifo_rate(previous_stock_queue, qty):
"""get FIFO (average) Rate from Queue"""
if qty >= 0:
total = sum(f[0] for f in previous_stock_queue)
return sum(flt(f[0]) * flt(f[1]) for f in previous_stock_queue) / flt(total) if total else 0.0
else:
available_qty_for_outgoing, outgoing_cost = 0, 0
qty_to_pop = abs(qty)
while qty_to_pop and previous_stock_queue:
batch = previous_stock_queue[0]
if 0 < batch[0] <= qty_to_pop:
# if batch qty > 0
# not enough or exactly same qty in current batch, clear batch
available_qty_for_outgoing += flt(batch[0])
outgoing_cost += flt(batch[0]) * flt(batch[1])
qty_to_pop -= batch[0]
previous_stock_queue.pop(0)
else:
# all from current batch
available_qty_for_outgoing += flt(qty_to_pop)
outgoing_cost += flt(qty_to_pop) * flt(batch[1])
batch[0] -= qty_to_pop
qty_to_pop = 0
return outgoing_cost / available_qty_for_outgoing
def get_valid_serial_nos(sr_nos, qty=0, item_code=''):
"""split serial nos, validate and return list of valid serial nos"""
# TODO: remove duplicates in client side
serial_nos = cstr(sr_nos).strip().replace(',', '\n').split('\n')
valid_serial_nos = []
for val in serial_nos:
if val:
val = val.strip()
if val in valid_serial_nos:
frappe.throw(_("Serial number {0} entered more than once").format(val))
else:
valid_serial_nos.append(val)
if qty and len(valid_serial_nos) != abs(qty):
frappe.throw(_("{0} valid serial nos for Item {1}").format(abs(qty), item_code))
return valid_serial_nos
def validate_warehouse_company(warehouse, company):
warehouse_company = frappe.db.get_value("Warehouse", warehouse, "company")
if warehouse_company and warehouse_company != company:
frappe.throw(_("Warehouse {0} does not belong to company {1}").format(warehouse, company),
InvalidWarehouseCompany)
| [
"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n",
"# License: GNU General Public License v3. See license.txt\n",
"\n",
"from __future__ import unicode_literals\n",
"import frappe\n",
"from frappe import _\n",
"import json\n",
"from frappe.utils import flt, cstr, nowdate, nowtime\n",
"\n",
"class InvalidWarehouseCompany(frappe.ValidationError): pass\n",
"\n",
"def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):\n",
"\tif not posting_date: posting_date = nowdate()\n",
"\n",
"\tvalues, condition = [posting_date], \"\"\n",
"\n",
"\tif warehouse:\n",
"\t\tvalues.append(warehouse)\n",
"\t\tcondition += \" AND warehouse = %s\"\n",
"\n",
"\tif item_code:\n",
"\t\tvalues.append(item_code)\n",
"\t\tcondition.append(\" AND item_code = %s\")\n",
"\n",
"\tstock_ledger_entries = frappe.db.sql(\"\"\"\n",
"\t\tSELECT item_code, stock_value\n",
"\t\tFROM `tabStock Ledger Entry`\n",
"\t\tWHERE posting_date <= %s {0}\n",
"\t\tORDER BY timestamp(posting_date, posting_time) DESC, name DESC\n",
"\t\"\"\".format(condition), values, as_dict=1)\n",
"\n",
"\tsle_map = {}\n",
"\tfor sle in stock_ledger_entries:\n",
"\t\tsle_map.setdefault(sle.item_code, flt(sle.stock_value))\n",
"\n",
"\treturn sum(sle_map.values())\n",
"\n",
"def get_stock_balance(item_code, warehouse, posting_date=None, posting_time=None, with_valuation_rate=False):\n",
"\t\"\"\"Returns stock balance quantity at given warehouse on given posting date or current date.\n",
"\n",
"\tIf `with_valuation_rate` is True, will return tuple (qty, rate)\"\"\"\n",
"\n",
"\tfrom erpnext.stock.stock_ledger import get_previous_sle\n",
"\n",
"\tif not posting_date: posting_date = nowdate()\n",
"\tif not posting_time: posting_time = nowtime()\n",
"\n",
"\tlast_entry = get_previous_sle({\n",
"\t\t\"item_code\": item_code,\n",
"\t\t\"warehouse\":warehouse,\n",
"\t\t\"posting_date\": posting_date,\n",
"\t\t\"posting_time\": posting_time })\n",
"\n",
"\tif with_valuation_rate:\n",
"\t\treturn (last_entry.qty_after_transaction, last_entry.valuation_rate) if last_entry else (0.0, 0.0)\n",
"\telse:\n",
"\t\treturn last_entry.qty_after_transaction or 0.0\n",
"\n",
"def get_latest_stock_balance():\n",
"\tbin_map = {}\n",
"\tfor d in frappe.db.sql(\"\"\"SELECT item_code, warehouse, stock_value as stock_value\n",
"\t\tFROM tabBin\"\"\", as_dict=1):\n",
"\t\t\tbin_map.setdefault(d.warehouse, {}).setdefault(d.item_code, flt(d.stock_value))\n",
"\n",
"\treturn bin_map\n",
"\n",
"def get_bin(item_code, warehouse):\n",
"\tbin = frappe.db.get_value(\"Bin\", {\"item_code\": item_code, \"warehouse\": warehouse})\n",
"\tif not bin:\n",
"\t\tbin_obj = frappe.get_doc({\n",
"\t\t\t\"doctype\": \"Bin\",\n",
"\t\t\t\"item_code\": item_code,\n",
"\t\t\t\"warehouse\": warehouse,\n",
"\t\t})\n",
"\t\tbin_obj.flags.ignore_permissions = 1\n",
"\t\tbin_obj.insert()\n",
"\telse:\n",
"\t\tbin_obj = frappe.get_doc('Bin', bin)\n",
"\tbin_obj.flags.ignore_permissions = True\n",
"\treturn bin_obj\n",
"\n",
"def update_bin(args, allow_negative_stock=False, via_landed_cost_voucher=False):\n",
"\tis_stock_item = frappe.db.get_value('Item', args.get(\"item_code\"), 'is_stock_item')\n",
"\tif is_stock_item:\n",
"\t\tbin = get_bin(args.get(\"item_code\"), args.get(\"warehouse\"))\n",
"\t\tbin.update_stock(args, allow_negative_stock, via_landed_cost_voucher)\n",
"\t\treturn bin\n",
"\telse:\n",
"\t\tfrappe.msgprint(_(\"Item {0} ignored since it is not a stock item\").format(args.get(\"item_code\")))\n",
"\n",
"def get_incoming_rate(args):\n",
"\t\"\"\"Get Incoming Rate based on valuation method\"\"\"\n",
"\tfrom erpnext.stock.stock_ledger import get_previous_sle\n",
"\n",
"\tin_rate = 0\n",
"\tif (args.get(\"serial_no\") or \"\").strip():\n",
"\t\tin_rate = get_avg_purchase_rate(args.get(\"serial_no\"))\n",
"\telse:\n",
"\t\tvaluation_method = get_valuation_method(args.get(\"item_code\"))\n",
"\t\tprevious_sle = get_previous_sle(args)\n",
"\t\tif valuation_method == 'FIFO':\n",
"\t\t\tif not previous_sle:\n",
"\t\t\t\treturn 0.0\n",
"\t\t\tprevious_stock_queue = json.loads(previous_sle.get('stock_queue', '[]') or '[]')\n",
"\t\t\tin_rate = get_fifo_rate(previous_stock_queue, args.get(\"qty\") or 0) if previous_stock_queue else 0\n",
"\t\telif valuation_method == 'Moving Average':\n",
"\t\t\tin_rate = previous_sle.get('valuation_rate') or 0\n",
"\n",
"\treturn in_rate\n",
"\n",
"def get_avg_purchase_rate(serial_nos):\n",
"\t\"\"\"get average value of serial numbers\"\"\"\n",
"\n",
"\tserial_nos = get_valid_serial_nos(serial_nos)\n",
"\treturn flt(frappe.db.sql(\"\"\"select avg(ifnull(purchase_rate, 0)) from `tabSerial No`\n",
"\t\twhere name in (%s)\"\"\" % \", \".join([\"%s\"] * len(serial_nos)),\n",
"\t\ttuple(serial_nos))[0][0])\n",
"\n",
"def get_valuation_method(item_code):\n",
"\t\"\"\"get valuation method from item or default\"\"\"\n",
"\tval_method = frappe.db.get_value('Item', item_code, 'valuation_method')\n",
"\tif not val_method:\n",
"\t\tval_method = frappe.db.get_value(\"Stock Settings\", None, \"valuation_method\") or \"FIFO\"\n",
"\treturn val_method\n",
"\n",
"def get_fifo_rate(previous_stock_queue, qty):\n",
"\t\"\"\"get FIFO (average) Rate from Queue\"\"\"\n",
"\tif qty >= 0:\n",
"\t\ttotal = sum(f[0] for f in previous_stock_queue)\n",
"\t\treturn sum(flt(f[0]) * flt(f[1]) for f in previous_stock_queue) / flt(total) if total else 0.0\n",
"\telse:\n",
"\t\tavailable_qty_for_outgoing, outgoing_cost = 0, 0\n",
"\t\tqty_to_pop = abs(qty)\n",
"\t\twhile qty_to_pop and previous_stock_queue:\n",
"\t\t\tbatch = previous_stock_queue[0]\n",
"\t\t\tif 0 < batch[0] <= qty_to_pop:\n",
"\t\t\t\t# if batch qty > 0\n",
"\t\t\t\t# not enough or exactly same qty in current batch, clear batch\n",
"\t\t\t\tavailable_qty_for_outgoing += flt(batch[0])\n",
"\t\t\t\toutgoing_cost += flt(batch[0]) * flt(batch[1])\n",
"\t\t\t\tqty_to_pop -= batch[0]\n",
"\t\t\t\tprevious_stock_queue.pop(0)\n",
"\t\t\telse:\n",
"\t\t\t\t# all from current batch\n",
"\t\t\t\tavailable_qty_for_outgoing += flt(qty_to_pop)\n",
"\t\t\t\toutgoing_cost += flt(qty_to_pop) * flt(batch[1])\n",
"\t\t\t\tbatch[0] -= qty_to_pop\n",
"\t\t\t\tqty_to_pop = 0\n",
"\n",
"\t\treturn outgoing_cost / available_qty_for_outgoing\n",
"\n",
"def get_valid_serial_nos(sr_nos, qty=0, item_code=''):\n",
"\t\"\"\"split serial nos, validate and return list of valid serial nos\"\"\"\n",
"\t# TODO: remove duplicates in client side\n",
"\tserial_nos = cstr(sr_nos).strip().replace(',', '\\n').split('\\n')\n",
"\n",
"\tvalid_serial_nos = []\n",
"\tfor val in serial_nos:\n",
"\t\tif val:\n",
"\t\t\tval = val.strip()\n",
"\t\t\tif val in valid_serial_nos:\n",
"\t\t\t\tfrappe.throw(_(\"Serial number {0} entered more than once\").format(val))\n",
"\t\t\telse:\n",
"\t\t\t\tvalid_serial_nos.append(val)\n",
"\n",
"\tif qty and len(valid_serial_nos) != abs(qty):\n",
"\t\tfrappe.throw(_(\"{0} valid serial nos for Item {1}\").format(abs(qty), item_code))\n",
"\n",
"\treturn valid_serial_nos\n",
"\n",
"def validate_warehouse_company(warehouse, company):\n",
"\twarehouse_company = frappe.db.get_value(\"Warehouse\", warehouse, \"company\")\n",
"\tif warehouse_company and warehouse_company != company:\n",
"\t\tfrappe.throw(_(\"Warehouse {0} does not belong to company {1}\").format(warehouse, company),\n",
"\t\t\tInvalidWarehouseCompany)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.013333333333333334,
0.0425531914893617,
0,
0.025,
0,
0.06666666666666667,
0.037037037037037035,
0.02702702702702703,
0,
0.06666666666666667,
0.037037037037037035,
0.023809523809523808,
0,
0.023809523809523808,
0.03125,
0.03225806451612903,
0.03225806451612903,
0.015384615384615385,
0.023255813953488372,
0,
0.07142857142857142,
0.029411764705882353,
0.017241379310344827,
0,
0.03333333333333333,
0,
0.01818181818181818,
0.021505376344086023,
0,
0.014705882352941176,
0,
0.017543859649122806,
0,
0.0425531914893617,
0.0425531914893617,
0,
0.030303030303030304,
0.038461538461538464,
0.08,
0.03125,
0.058823529411764705,
0,
0.04,
0.019801980198019802,
0.14285714285714285,
0.02040816326530612,
0,
0.03125,
0.07142857142857142,
0.024096385542168676,
0.03333333333333333,
0.03614457831325301,
0,
0.0625,
0,
0.02857142857142857,
0.023809523809523808,
0.07692307692307693,
0.034482758620689655,
0.047619047619047616,
0.037037037037037035,
0.037037037037037035,
0.2,
0.02564102564102564,
0.05263157894736842,
0.14285714285714285,
0.02564102564102564,
0.024390243902439025,
0.0625,
0,
0.024691358024691357,
0.023529411764705882,
0.05263157894736842,
0.016129032258064516,
0.013888888888888888,
0.07692307692307693,
0.14285714285714285,
0.02,
0,
0.034482758620689655,
0.0196078431372549,
0.017543859649122806,
0,
0.07692307692307693,
0.023255813953488372,
0.017543859649122806,
0.14285714285714285,
0.015384615384615385,
0.025,
0.030303030303030304,
0.041666666666666664,
0.06666666666666667,
0.023809523809523808,
0.0196078431372549,
0.022222222222222223,
0.018867924528301886,
0,
0.0625,
0,
0.02564102564102564,
0.023255813953488372,
0,
0.02127659574468085,
0.023255813953488372,
0.015873015873015872,
0.07142857142857142,
0,
0.02702702702702703,
0.02040816326530612,
0.0136986301369863,
0.05,
0.02247191011235955,
0.05263157894736842,
0,
0.021739130434782608,
0.023809523809523808,
0.07142857142857142,
0.02,
0.020618556701030927,
0.14285714285714285,
0.0196078431372549,
0.041666666666666664,
0.022222222222222223,
0.02857142857142857,
0.029411764705882353,
0.043478260869565216,
0.014925373134328358,
0.020833333333333332,
0.0196078431372549,
0.037037037037037035,
0.03125,
0.1111111111111111,
0.034482758620689655,
0.02,
0.018867924528301886,
0.037037037037037035,
0.05263157894736842,
0,
0.019230769230769232,
0,
0.01818181818181818,
0.014285714285714285,
0.023809523809523808,
0.015151515151515152,
0,
0.043478260869565216,
0.041666666666666664,
0.1,
0.047619047619047616,
0.03225806451612903,
0.013157894736842105,
0.1111111111111111,
0.030303030303030304,
0,
0.02127659574468085,
0.024096385542168676,
0,
0.04,
0,
0.019230769230769232,
0.013157894736842105,
0.017857142857142856,
0.021505376344086023,
0.07142857142857142
] | 175 | 0.030839 |
__author__ = 'mdavid'
import string
import re
class InputValidation:
@staticmethod
def is_valid_domain(text):
if not text:
return False
if '.' not in text:
return False
allowed = set(string.ascii_letters + string.digits + '-.')
if set(text) - allowed:
return False
return True
@staticmethod
def is_valid_wallet_name(text):
if not text:
return False
allowed = set(string.ascii_letters + string.digits + '-')
if set(text) - allowed:
return False
return True
@staticmethod
def is_valid_field(text):
if isinstance(text, float) or isinstance(text, int):
text = str(text)
if not text:
return False
allowed = set(string.ascii_letters + string.digits + ' -.#,()+!@_')
if set(text) - allowed:
return False
return True
@staticmethod
def is_valid_wallet_address(wallet_address):
if not wallet_address:
return False
allowed = set(string.ascii_letters + string.digits)
if set(wallet_address) - allowed:
return False
return True
| [
"__author__ = 'mdavid'\n",
"\n",
"import string\n",
"import re\n",
"\n",
"\n",
"class InputValidation:\n",
" @staticmethod\n",
" def is_valid_domain(text):\n",
" if not text:\n",
" return False\n",
"\n",
" if '.' not in text:\n",
" return False\n",
"\n",
" allowed = set(string.ascii_letters + string.digits + '-.')\n",
" if set(text) - allowed:\n",
" return False\n",
" return True\n",
"\n",
" @staticmethod\n",
" def is_valid_wallet_name(text):\n",
" if not text:\n",
" return False\n",
"\n",
" allowed = set(string.ascii_letters + string.digits + '-')\n",
" if set(text) - allowed:\n",
" return False\n",
" return True\n",
"\n",
" @staticmethod\n",
" def is_valid_field(text):\n",
"\n",
" if isinstance(text, float) or isinstance(text, int):\n",
" text = str(text)\n",
"\n",
" if not text:\n",
" return False\n",
"\n",
" allowed = set(string.ascii_letters + string.digits + ' -.#,()+!@_')\n",
" if set(text) - allowed:\n",
" return False\n",
" return True\n",
"\n",
" @staticmethod\n",
" def is_valid_wallet_address(wallet_address):\n",
" if not wallet_address:\n",
" return False\n",
"\n",
" allowed = set(string.ascii_letters + string.digits)\n",
" if set(wallet_address) - allowed:\n",
" return False\n",
" return True\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 54 | 0.018519 |
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
context = ge.get_context()
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_runtime_data_connector_name": {
"class_name": "RuntimeDataConnector",
"module_name": "great_expectations.datasource.data_connector",
"batch_identifiers": ["default_identifier_name"],
},
"default_inferred_data_connector_name": {
"class_name": "InferredAssetFilesystemDataConnector",
"base_directory": "<PATH_TO_YOUR_DATA_HERE>",
"default_regex": {"group_names": ["data_asset_name"], "pattern": "(.*)"},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_config["data_connectors"]["default_inferred_data_connector_name"][
"base_directory"
] = "../data/"
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
# Here is a RuntimeBatchRequest using a path to a single CSV file
batch_request = RuntimeBatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="<YOUR_MEANINGFUL_NAME>", # This can be anything that identifies this data_asset for you
runtime_parameters={"path": "<PATH_TO_YOUR_DATA_HERE>"}, # Add your path here.
batch_identifiers={"default_identifier_name": "something_something"},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the BatchRequest above.
batch_request.runtime_parameters["path"] = "./data/yellow_trip_data_sample_2019-01.csv"
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
# Here is a BatchRequest naming a data_asset
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "yellow_trip_data_sample_2019-01.csv"
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_trip_data_sample_2018-01.csv" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_inferred_data_connector_name"
]
)
| [
"from ruamel import yaml\n",
"\n",
"import great_expectations as ge\n",
"from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest\n",
"\n",
"context = ge.get_context()\n",
"\n",
"datasource_config = {\n",
" \"name\": \"taxi_datasource\",\n",
" \"class_name\": \"Datasource\",\n",
" \"module_name\": \"great_expectations.datasource\",\n",
" \"execution_engine\": {\n",
" \"module_name\": \"great_expectations.execution_engine\",\n",
" \"class_name\": \"PandasExecutionEngine\",\n",
" },\n",
" \"data_connectors\": {\n",
" \"default_runtime_data_connector_name\": {\n",
" \"class_name\": \"RuntimeDataConnector\",\n",
" \"module_name\": \"great_expectations.datasource.data_connector\",\n",
" \"batch_identifiers\": [\"default_identifier_name\"],\n",
" },\n",
" \"default_inferred_data_connector_name\": {\n",
" \"class_name\": \"InferredAssetFilesystemDataConnector\",\n",
" \"base_directory\": \"<PATH_TO_YOUR_DATA_HERE>\",\n",
" \"default_regex\": {\"group_names\": [\"data_asset_name\"], \"pattern\": \"(.*)\"},\n",
" },\n",
" },\n",
"}\n",
"\n",
"# Please note this override is only to provide good UX for docs and tests.\n",
"# In normal usage you'd set your path directly in the yaml above.\n",
"datasource_config[\"data_connectors\"][\"default_inferred_data_connector_name\"][\n",
" \"base_directory\"\n",
"] = \"../data/\"\n",
"\n",
"context.test_yaml_config(yaml.dump(datasource_config))\n",
"\n",
"context.add_datasource(**datasource_config)\n",
"\n",
"# Here is a RuntimeBatchRequest using a path to a single CSV file\n",
"batch_request = RuntimeBatchRequest(\n",
" datasource_name=\"taxi_datasource\",\n",
" data_connector_name=\"default_runtime_data_connector_name\",\n",
" data_asset_name=\"<YOUR_MEANINGFUL_NAME>\", # This can be anything that identifies this data_asset for you\n",
" runtime_parameters={\"path\": \"<PATH_TO_YOUR_DATA_HERE>\"}, # Add your path here.\n",
" batch_identifiers={\"default_identifier_name\": \"something_something\"},\n",
")\n",
"\n",
"# Please note this override is only to provide good UX for docs and tests.\n",
"# In normal usage you'd set your path directly in the BatchRequest above.\n",
"batch_request.runtime_parameters[\"path\"] = \"./data/yellow_trip_data_sample_2019-01.csv\"\n",
"\n",
"context.create_expectation_suite(\n",
" expectation_suite_name=\"test_suite\", overwrite_existing=True\n",
")\n",
"validator = context.get_validator(\n",
" batch_request=batch_request, expectation_suite_name=\"test_suite\"\n",
")\n",
"print(validator.head())\n",
"\n",
"# NOTE: The following code is only for testing and can be ignored by users.\n",
"assert isinstance(validator, ge.validator.validator.Validator)\n",
"\n",
"# Here is a BatchRequest naming a data_asset\n",
"batch_request = BatchRequest(\n",
" datasource_name=\"taxi_datasource\",\n",
" data_connector_name=\"default_inferred_data_connector_name\",\n",
" data_asset_name=\"<YOUR_DATA_ASSET_NAME>\",\n",
")\n",
"\n",
"# Please note this override is only to provide good UX for docs and tests.\n",
"# In normal usage you'd set your data asset name directly in the BatchRequest above.\n",
"batch_request.data_asset_name = \"yellow_trip_data_sample_2019-01.csv\"\n",
"\n",
"context.create_expectation_suite(\n",
" expectation_suite_name=\"test_suite\", overwrite_existing=True\n",
")\n",
"validator = context.get_validator(\n",
" batch_request=batch_request, expectation_suite_name=\"test_suite\"\n",
")\n",
"print(validator.head())\n",
"\n",
"# NOTE: The following code is only for testing and can be ignored by users.\n",
"assert isinstance(validator, ge.validator.validator.Validator)\n",
"assert [ds[\"name\"] for ds in context.list_datasources()] == [\"taxi_datasource\"]\n",
"assert \"yellow_trip_data_sample_2018-01.csv\" in set(\n",
" context.get_available_data_asset_names()[\"taxi_datasource\"][\n",
" \"default_inferred_data_connector_name\"\n",
" ]\n",
")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0.011904761904761904,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 90 | 0.000619 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Unitary gate.
"""
from ._instruction import Instruction
from ._quantumregister import QuantumRegister
from ._qiskiterror import QISKitError
class Gate(Instruction):
"""Unitary gate."""
def __init__(self, name, param, args, circuit=None):
"""Create a new composite gate.
name = instruction name string
param = list of real parameters
arg = list of pairs (Register, index)
circuit = QuantumCircuit or CompositeGate containing this gate
"""
for argument in args:
if not isinstance(argument[0], QuantumRegister):
raise QISKitError("argument not (QuantumRegister, int) "
+ "tuple")
super(Gate, self).__init__(name, param, args, circuit)
def inverse(self):
"""Invert this gate."""
raise QISKitError("inverse not implemented")
def q_if(self, *qregs):
"""Add controls to this gate."""
raise QISKitError("control not implemented")
| [
"# -*- coding: utf-8 -*-\r\n",
"\r\n",
"# Copyright 2017 IBM RESEARCH. All Rights Reserved.\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"#\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"# =============================================================================\r\n",
"\r\n",
"\"\"\"\r\n",
"Unitary gate.\r\n",
"\"\"\"\r\n",
"from ._instruction import Instruction\r\n",
"from ._quantumregister import QuantumRegister\r\n",
"from ._qiskiterror import QISKitError\r\n",
"\r\n",
"\r\n",
"class Gate(Instruction):\r\n",
" \"\"\"Unitary gate.\"\"\"\r\n",
"\r\n",
" def __init__(self, name, param, args, circuit=None):\r\n",
" \"\"\"Create a new composite gate.\r\n",
"\r\n",
" name = instruction name string\r\n",
" param = list of real parameters\r\n",
" arg = list of pairs (Register, index)\r\n",
" circuit = QuantumCircuit or CompositeGate containing this gate\r\n",
" \"\"\"\r\n",
" for argument in args:\r\n",
" if not isinstance(argument[0], QuantumRegister):\r\n",
" raise QISKitError(\"argument not (QuantumRegister, int) \"\r\n",
" + \"tuple\")\r\n",
" super(Gate, self).__init__(name, param, args, circuit)\r\n",
"\r\n",
" def inverse(self):\r\n",
" \"\"\"Invert this gate.\"\"\"\r\n",
" raise QISKitError(\"inverse not implemented\")\r\n",
"\r\n",
" def q_if(self, *qregs):\r\n",
" \"\"\"Add controls to this gate.\"\"\"\r\n",
" raise QISKitError(\"control not implemented\")\r\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 49 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import predict_steering
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/home/sridhar/code/Challenge/Code_tfRecord/model_deg5/eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'train_eval',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/home/sridhar/code/Challenge/Code_tfRecord/model_deg5/',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 3812,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, mse_op, logits_op, steering_angles_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
mse: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size_flag))
s_mse = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size_flag
step = 0
while step < num_iter and not coord.should_stop():
mse_predict, steering_predict, steering_angles = sess.run([mse_op, logits_op, steering_angles_op])
s_mse += np.sum(mse_predict)
step += 1
# Compute precision @ 1.
average_mse = s_mse / total_sample_count
print('%s: Average MSE = %.3f' % (datetime.now(), average_mse))
print('%s: Sum MSE = %.3f' % (datetime.now(), s_mse))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Average MSE', simple_value=average_mse)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels
eval_data = FLAGS.eval_data == 'test'
images, steering_angles = predict_steering.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = predict_steering.inference_nvidia(images)
# Calculate predictions.
mse = tf.reduce_mean(tf.square(tf.sub(logits, steering_angles)),name='mse')
#top_k_op = tf.nn.in_top_k(logits, steering_angles, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
predict_steering.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
#tf.scalar_summary('steering_predict',logits)
tf.scalar_summary(mse.op.name,mse)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, mse, logits, steering_angles, summary_op)
if FLAGS.run_once:
break
#time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run() | [
"from __future__ import absolute_import\n",
"from __future__ import division\n",
"from __future__ import print_function\n",
"\n",
"from datetime import datetime\n",
"import math\n",
"import time\n",
"\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"\n",
"import predict_steering\n",
"\n",
"FLAGS = tf.app.flags.FLAGS\n",
"\n",
"tf.app.flags.DEFINE_string('eval_dir', '/home/sridhar/code/Challenge/Code_tfRecord/model_deg5/eval',\n",
" \"\"\"Directory where to write event logs.\"\"\")\n",
"tf.app.flags.DEFINE_string('eval_data', 'train_eval',\n",
" \"\"\"Either 'test' or 'train_eval'.\"\"\")\n",
"tf.app.flags.DEFINE_string('checkpoint_dir', '/home/sridhar/code/Challenge/Code_tfRecord/model_deg5/',\n",
" \"\"\"Directory where to read model checkpoints.\"\"\")\n",
"tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,\n",
" \"\"\"How often to run the eval.\"\"\")\n",
"tf.app.flags.DEFINE_integer('num_examples', 3812,\n",
" \"\"\"Number of examples to run.\"\"\")\n",
"tf.app.flags.DEFINE_boolean('run_once', False,\n",
" \"\"\"Whether to run eval only once.\"\"\")\n",
"\n",
"\n",
"def eval_once(saver, summary_writer, mse_op, logits_op, steering_angles_op, summary_op):\n",
" \"\"\"Run Eval once.\n",
" Args:\n",
" saver: Saver.\n",
" summary_writer: Summary writer.\n",
" mse: Top K op.\n",
" summary_op: Summary op.\n",
" \"\"\"\n",
" with tf.Session() as sess:\n",
" ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n",
" if ckpt and ckpt.model_checkpoint_path:\n",
" # Restores from checkpoint\n",
" saver.restore(sess, ckpt.model_checkpoint_path)\n",
" # Assuming model_checkpoint_path looks something like:\n",
" # /my-favorite-path/model.ckpt-0,\n",
" # extract global_step from it.\n",
" global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n",
" else:\n",
" print('No checkpoint file found')\n",
" return\n",
"\n",
" # Start the queue runners.\n",
" coord = tf.train.Coordinator()\n",
" try:\n",
" threads = []\n",
" for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n",
" threads.extend(qr.create_threads(sess, coord=coord, daemon=True,\n",
" start=True))\n",
"\n",
" num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size_flag))\n",
" s_mse = 0 # Counts the number of correct predictions.\n",
" total_sample_count = num_iter * FLAGS.batch_size_flag\n",
" step = 0\n",
" while step < num_iter and not coord.should_stop():\n",
" mse_predict, steering_predict, steering_angles = sess.run([mse_op, logits_op, steering_angles_op])\n",
" s_mse += np.sum(mse_predict)\n",
" step += 1\n",
"\n",
" # Compute precision @ 1.\n",
" average_mse = s_mse / total_sample_count\n",
" print('%s: Average MSE = %.3f' % (datetime.now(), average_mse))\n",
" print('%s: Sum MSE = %.3f' % (datetime.now(), s_mse))\n",
"\n",
" summary = tf.Summary()\n",
" summary.ParseFromString(sess.run(summary_op))\n",
" summary.value.add(tag='Average MSE', simple_value=average_mse)\n",
" summary_writer.add_summary(summary, global_step)\n",
" except Exception as e: # pylint: disable=broad-except\n",
" coord.request_stop(e)\n",
"\n",
" coord.request_stop()\n",
" coord.join(threads, stop_grace_period_secs=10)\n",
"\n",
"\n",
"def evaluate():\n",
" \"\"\"Eval for a number of steps.\"\"\"\n",
" with tf.Graph().as_default() as g:\n",
" # Get images and labels\n",
" eval_data = FLAGS.eval_data == 'test'\n",
" images, steering_angles = predict_steering.inputs(eval_data=eval_data)\n",
"\n",
" # Build a Graph that computes the logits predictions from the\n",
" # inference model.\n",
" logits = predict_steering.inference_nvidia(images)\n",
"\n",
" # Calculate predictions.\n",
" mse = tf.reduce_mean(tf.square(tf.sub(logits, steering_angles)),name='mse')\n",
" #top_k_op = tf.nn.in_top_k(logits, steering_angles, 1)\n",
"\n",
" # Restore the moving average version of the learned variables for eval.\n",
" variable_averages = tf.train.ExponentialMovingAverage(\n",
" predict_steering.MOVING_AVERAGE_DECAY)\n",
" variables_to_restore = variable_averages.variables_to_restore()\n",
" saver = tf.train.Saver(variables_to_restore)\n",
"\n",
" # Build the summary operation based on the TF collection of Summaries.\n",
" #tf.scalar_summary('steering_predict',logits)\n",
" tf.scalar_summary(mse.op.name,mse)\n",
"\n",
" summary_op = tf.merge_all_summaries()\n",
"\n",
" summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)\n",
"\n",
" while True:\n",
" eval_once(saver, summary_writer, mse, logits, steering_angles, summary_op)\n",
" if FLAGS.run_once:\n",
" break\n",
" #time.sleep(FLAGS.eval_interval_secs)\n",
"\n",
"\n",
"def main(argv=None): # pylint: disable=unused-argument\n",
" if tf.gfile.Exists(FLAGS.eval_dir):\n",
" tf.gfile.DeleteRecursively(FLAGS.eval_dir)\n",
" tf.gfile.MakeDirs(FLAGS.eval_dir)\n",
" evaluate()\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" tf.app.run()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0.011235955056179775,
0.05,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0.030303030303030304,
0.018518518518518517,
0.01639344262295082,
0.023809523809523808,
0.02702702702702703,
0.012987012987012988,
0,
0.025,
0.07692307692307693,
0,
0,
0,
0,
0.05263157894736842,
0.015873015873015872,
0,
0,
0,
0.013157894736842105,
0.01639344262295082,
0.016666666666666666,
0.06666666666666667,
0.017543859649122806,
0.009345794392523364,
0,
0,
0,
0.03225806451612903,
0.02127659574468085,
0.014285714285714285,
0.016666666666666666,
0,
0.034482758620689655,
0.019230769230769232,
0.014492753623188406,
0.01818181818181818,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0125,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0.04,
0,
0.045454545454545456,
0,
0,
0,
0.02631578947368421,
0,
0.027777777777777776,
0.07692307692307693,
0,
0,
0,
0.14285714285714285
] | 128 | 0.010086 |
# __init__.py
"""
pygaero
---------
Python tools for the processing of data obtained from HR-ToF-CIMS, with some functions
being specifically designed for data obtained using the Filter Inlet for Gases and
Aerosols (FigAERO) inlet. Time series data is handled by pandas DataFrames, with data
imported from csv files. Some functions will work for any generic numerical time series.
Note: This package is designed around handling time series data within pandas DataFrames, but
may be extended to other formats (e.g., numpy.ndarray). However, thorough testing has been done
in this respect."
Submodules
-------------
pio
Module containing i/o functions for data import and export.
therm
Module containing functions designed for thermogram time series analysis, with a focus on
peak signal detection during thermograms (Tmax).
gen_chem
Module containing general chemistry functions to handle chemical formula names, elemental analysis
of formulas, etc.
clustering
Module containing functions for clustering analysis of ToF-CIMS data. Examples of usable features
from CIMS data are: # of carbon, # of oxygen, O/C ratios, Molecular Weights, Oxidation states, etc.
Preparation of features for clustering can be performed by using the 'concat_df_ls' function in
gen_chem.py to concatenate TMax stats and elemental parameters (O, C, O/C, N, etc.).
Many of the functions here, particularly those intended for preprocessing/cleaning of feature
data could be used when implementing other supervised/unsupervised machine learning methods.
"""
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
data_path = os.path.join(dir_path, 'elements.csv')
table_of_elements = {}
with open(data_path) as f:
for line in f.readlines():
line = line.strip().split(',')
table_of_elements[line[0]] = float(line[1])
__version__ = "pygaero-1.50"
__all__ = ['clustering', 'gen_chem', 'pio', 'therm', 'table_of_elements']
| [
"# __init__.py\n",
"\"\"\"\n",
"pygaero\n",
"---------\n",
" Python tools for the processing of data obtained from HR-ToF-CIMS, with some functions\n",
" being specifically designed for data obtained using the Filter Inlet for Gases and\n",
" Aerosols (FigAERO) inlet. Time series data is handled by pandas DataFrames, with data\n",
" imported from csv files. Some functions will work for any generic numerical time series.\n",
" Note: This package is designed around handling time series data within pandas DataFrames, but\n",
" may be extended to other formats (e.g., numpy.ndarray). However, thorough testing has been done\n",
" in this respect.\"\n",
"\n",
"Submodules\n",
"-------------\n",
"pio\n",
" Module containing i/o functions for data import and export.\n",
"\n",
"therm\n",
" Module containing functions designed for thermogram time series analysis, with a focus on\n",
" peak signal detection during thermograms (Tmax).\n",
"\n",
"gen_chem\n",
" Module containing general chemistry functions to handle chemical formula names, elemental analysis\n",
" of formulas, etc.\n",
"\n",
"clustering\n",
" Module containing functions for clustering analysis of ToF-CIMS data. Examples of usable features\n",
" from CIMS data are: # of carbon, # of oxygen, O/C ratios, Molecular Weights, Oxidation states, etc.\n",
" Preparation of features for clustering can be performed by using the 'concat_df_ls' function in\n",
" gen_chem.py to concatenate TMax stats and elemental parameters (O, C, O/C, N, etc.).\n",
" Many of the functions here, particularly those intended for preprocessing/cleaning of feature\n",
" data could be used when implementing other supervised/unsupervised machine learning methods.\n",
"\n",
"\"\"\"\n",
"import os\n",
"path = os.path.abspath(__file__)\n",
"dir_path = os.path.dirname(path)\n",
"data_path = os.path.join(dir_path, 'elements.csv')\n",
"table_of_elements = {}\n",
"\n",
"with open(data_path) as f:\n",
" for line in f.readlines():\n",
" line = line.strip().split(',')\n",
" table_of_elements[line[0]] = float(line[1])\n",
"\n",
"\n",
"__version__ = \"pygaero-1.50\"\n",
"__all__ = ['clustering', 'gen_chem', 'pio', 'therm', 'table_of_elements']\n"
] | [
0,
0,
0,
0,
0.010526315789473684,
0.011494252873563218,
0.011111111111111112,
0.010752688172043012,
0.01020408163265306,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0.009433962264150943,
0.009615384615384616,
0.01,
0.011235955056179775,
0.01020408163265306,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 48 | 0.003009 |
from dispel4py.base import IterativePE
class SeismoStreamPE(IterativePE):
INPUT_NAME = IterativePE.INPUT_NAME
OUTPUT_NAME = IterativePE.OUTPUT_NAME
def __init__(self, compute_fn, params={}):
IterativePE.__init__(self)
self.compute_fn = compute_fn
self.params = params
def _reset(self):
self._inputs = None
self._input_tuple = None
self._data = None
self._metadata = None
self._timestamp = None
self._location = None
def _assign(self, inputs):
self._inputs = inputs[SeismoStreamPE.INPUT_NAME]
self._input_tuple = self._inputs[2:]
self._data = [ d.data for d in self._input_data ]
self._metadata = [ d.metadata for d in self._input_data ]
self._timestamp = self._inputs[0]
self._location = self._inputs[1]
self._output_tuple = []
def get_data(self):
return self._input_data
def write_data(self, data, metadata=None):
output = [self._timestamp, self._location, DataTuple(data, metadata)]
self.write(SeismoStreamPE.OUTPUT_NAME, output)
def process(self, inputs):
try:
self._assign(inputs)
result = compute_fn(self, **params)
if result:
self.write_data(result)
finally:
self._reset()
class DataTuple(object):
def __init__(self, data=None, metadata=None):
self.data = data
self.metadata = metadata
from dispel4py.workflow_graph import WorkflowGraph
def create_pipeline(chain, name_prefix='SeismoStreamPE_', name_suffix=''):
'''
Creates a composite PE wrapping a pipeline that processes obspy streams.
:param chain: list of functions that process obspy streams. The function takes one input parameter, stream, and returns an output stream.
:param requestId: id of the request that the stream is associated with
:param controlParameters: environment parameters for the processing elements
:rtype: dictionary inputs and outputs of the composite PE that was created
'''
prev = None
first = None
graph = WorkflowGraph()
for fn_desc in chain:
try:
fn = fn_desc[0]
params = fn_desc[1]
except TypeError:
fn = fn_desc
params = {}
pe = SeismoStreamPE(fn, params)
pe.name = name_prefix + fn.__name__ + name_suffix
if prev:
graph.connect(prev, SeismoStreamPE.OUTPUT_NAME, pe, SeismoStreamPE.INPUT_NAME)
else:
first = pe
prev = pe
# Map inputs and outputs of the wrapper to the nodes in the subgraph
graph.inputmappings = { 'input' : (first, INPUT_NAME) }
graph.outputmappings = { 'output' : (prev, OUTPUT_NAME) }
return graph
| [
"from dispel4py.base import IterativePE\n",
"\n",
"class SeismoStreamPE(IterativePE):\n",
" \n",
" INPUT_NAME = IterativePE.INPUT_NAME\n",
" OUTPUT_NAME = IterativePE.OUTPUT_NAME\n",
" \n",
" def __init__(self, compute_fn, params={}):\n",
" IterativePE.__init__(self)\n",
" self.compute_fn = compute_fn\n",
" self.params = params\n",
" \n",
" def _reset(self):\n",
" self._inputs = None\n",
" self._input_tuple = None\n",
" self._data = None\n",
" self._metadata = None\n",
" self._timestamp = None\n",
" self._location = None\n",
" \n",
" def _assign(self, inputs):\n",
" self._inputs = inputs[SeismoStreamPE.INPUT_NAME]\n",
" self._input_tuple = self._inputs[2:]\n",
" self._data = [ d.data for d in self._input_data ]\n",
" self._metadata = [ d.metadata for d in self._input_data ]\n",
" self._timestamp = self._inputs[0]\n",
" self._location = self._inputs[1]\n",
" self._output_tuple = []\n",
" \n",
" def get_data(self):\n",
" return self._input_data\n",
" \n",
" def write_data(self, data, metadata=None):\n",
" output = [self._timestamp, self._location, DataTuple(data, metadata)]\n",
" self.write(SeismoStreamPE.OUTPUT_NAME, output)\n",
" \n",
" def process(self, inputs):\n",
" try:\n",
" self._assign(inputs)\n",
" result = compute_fn(self, **params)\n",
" if result:\n",
" self.write_data(result)\n",
" finally:\n",
" self._reset()\n",
" \n",
"class DataTuple(object):\n",
" def __init__(self, data=None, metadata=None):\n",
" self.data = data\n",
" self.metadata = metadata\n",
"\n",
"from dispel4py.workflow_graph import WorkflowGraph\n",
"\n",
"def create_pipeline(chain, name_prefix='SeismoStreamPE_', name_suffix=''):\n",
" '''\n",
" Creates a composite PE wrapping a pipeline that processes obspy streams.\n",
" :param chain: list of functions that process obspy streams. The function takes one input parameter, stream, and returns an output stream.\n",
" :param requestId: id of the request that the stream is associated with\n",
" :param controlParameters: environment parameters for the processing elements\n",
" :rtype: dictionary inputs and outputs of the composite PE that was created\n",
" '''\n",
" prev = None\n",
" first = None\n",
" graph = WorkflowGraph()\n",
" \n",
" for fn_desc in chain:\n",
" try:\n",
" \tfn = fn_desc[0]\n",
" \tparams = fn_desc[1]\n",
" except TypeError:\n",
" fn = fn_desc\n",
" params = {}\n",
" \n",
" pe = SeismoStreamPE(fn, params)\n",
" pe.name = name_prefix + fn.__name__ + name_suffix\n",
" \n",
" if prev:\n",
" graph.connect(prev, SeismoStreamPE.OUTPUT_NAME, pe, SeismoStreamPE.INPUT_NAME)\n",
" else:\n",
" first = pe\n",
" prev = pe\n",
" \n",
" # Map inputs and outputs of the wrapper to the nodes in the subgraph\n",
" graph.inputmappings = { 'input' : (first, INPUT_NAME) }\n",
" graph.outputmappings = { 'output' : (prev, OUTPUT_NAME) }\n",
" \n",
" return graph\n",
"\n"
] | [
0,
0,
0.02857142857142857,
0.2,
0,
0,
0.2,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.034482758620689655,
0.030303030303030304,
0,
0,
0,
0.1111111111111111,
0,
0,
0.07692307692307693,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.04,
0,
0,
0,
0,
0.0392156862745098,
0,
0.013333333333333334,
0,
0,
0.007042253521126761,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0.2,
0,
0,
0.12,
0.06896551724137931,
0,
0,
0,
0.07692307692307693,
0,
0,
0.1111111111111111,
0,
0.01098901098901099,
0,
0,
0,
0.07692307692307693,
0,
0.06451612903225806,
0.04838709677419355,
0.2,
0,
1
] | 87 | 0.036568 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from ast import literal_eval
import pytz
from werkzeug import abort, redirect
from wtforms import Form, TextField, PasswordField, validators
from nereid import jsonify, flash, render_template, url_for, cache
from nereid.globals import session, request
from nereid.helpers import login_required, key_from_list, get_flashed_messages
from nereid.signals import login, failed_login, logout
from trytond.model import ModelView, ModelSQL, fields
from trytond.backend import TableHandler
from trytond.transaction import Transaction
from trytond.pool import Pool
from .i18n import _
__all__ = ['URLMap', 'WebSite', 'URLRule', 'URLRuleDefaults',
'WebsiteCountry', 'WebsiteCurrency']
class URLMap(ModelSQL, ModelView):
"""
URL Map
~~~~~~~
A collection of URLs for a website. This is analogous to werkzeug's
URL Map.
:param name: Name of the URL Map
:param default_subdomain: Default subdomain for URLs in this Map
:param active: Whether the URL Map is active or not.
Rules:
~~~~~~
:param rules: O2M URLRules
Advanced:
~~~~~~~~~
:param charset: default value - utf-8
:param strict_slashes: Boolean field if / in url map is taken seriously
:param unique_urls: Enable `redirect_defaults` in the URL Map and
redirects the defaults to the URL
"""
__name__ = "nereid.url_map"
name = fields.Char(
'Name', required=True, select=True,
)
default_subdomain = fields.Char(
'Default Subdomain',
)
rules = fields.One2Many(
'nereid.url_rule',
'url_map',
'Rules'
)
charset = fields.Char('Char Set')
strict_slashes = fields.Boolean('Strict Slashes')
unique_urls = fields.Boolean('Unique URLs')
active = fields.Boolean('Active')
@staticmethod
def default_active():
"By default URL is active"
return True
@staticmethod
def default_charset():
"By default characterset is utf-8"
return 'utf-8'
def get_rules_arguments(self):
"""
Constructs a list of dictionary of arguments needed
for URL Rule construction. A wrapper around the
URL RULE get_rule_arguments
"""
rule_args = [ ]
for rule in self.rules:
rule_args.append(rule.get_rule_arguments())
return rule_args
class LoginForm(Form):
"Default Login Form"
email = TextField(_('e-mail'), [validators.Required(), validators.Email()])
password = PasswordField(_('Password'), [validators.Required()])
class WebSite(ModelSQL, ModelView):
"""
One of the most powerful features of Nereid is the ability to
manage multiple websites from one back-end. A web site in nereid
represents a collection or URLs, settings.
:param name: Name of the web site
:param base_url: The unique URL of the website, You cannot have two
websites, with the same base_url
:param url_map: The active URL Map for the website (M2O URLMap)
:param company: The company linked with the website.
:param active: Whether the website is active or not.
"""
__name__ = "nereid.website"
#: The name field is used for both information and also as
#: the site identifier for nereid. The WSGI application requires
#: SITE argument. The SITE argument is then used to load URLs and
#: other settings for the website. Needs to be unique
name = fields.Char('Name', required=True, select=True)
#: The URLMap is made as a different object which functions as a
#: collection of Rules. This will allow easy replication of sites
#: which perform with same URL structures but different templates
url_map = fields.Many2One('nereid.url_map', 'URL Map', required=True)
#: The company to which the website belongs. Useful when creating
#: records like sale order which require a company to be present
company = fields.Many2One('company.company', 'Company', required=True)
active = fields.Boolean('Active')
#: The list of countries this website operates in. Used for generating
#: Countries list in the registration form etc.
countries = fields.Many2Many(
'nereid.website-country.country', 'website', 'country',
'Countries Available')
#: Allowed currencies in the website
currencies = fields.Many2Many(
'nereid.website-currency.currency',
'website', 'currency', 'Currencies Available')
#: Default language
default_language = fields.Many2One('ir.lang', 'Default Language',
required=True)
#: The res.user with which the nereid application will be loaded
#: .. versionadded: 0.3
application_user = fields.Many2One(
'res.user', 'Application User', required=True
)
guest_user = fields.Many2One(
'nereid.user', 'Guest user', required=True
)
timezone = fields.Selection(
[(x, x) for x in pytz.common_timezones], 'Timezone', translate=False
)
@staticmethod
def default_timezone():
return 'UTC'
@staticmethod
def default_active():
return True
@classmethod
def __setup__(cls):
super(WebSite, cls).__setup__()
cls._sql_constraints = [
('name_uniq', 'UNIQUE(name)',
'Another site with the same name already exists!')
]
@classmethod
def country_list(cls):
"""
Return the list of countries in JSON
"""
return jsonify(result = [
{'key': c.id, 'value': c.name} \
for c in request.nereid_website.countries
])
@staticmethod
def subdivision_list():
"""
Return the list of states for given country
"""
country = int(request.args.get('country', 0))
if country not in [c.id for c in request.nereid_website.countries]:
abort(404)
Subdivision = Pool().get('country.subdivision')
subdivisions = Subdivision.search([('country', '=', country)])
return jsonify(
result = [{
'id': s.id,
'name': s.name,
'code': s.code,
} for s in subdivisions
]
)
def get_urls(self, name):
"""
Return complete list of URLs
"""
URLMap = Pool().get('nereid.url_map')
websites = self.search([('name', '=', name)])
if not websites:
raise RuntimeError("Website with Name %s not found" % name)
return URLMap.get_rules_arguments(websites[0].url_map.id)
def stats(self, **arguments):
"""
Test method.
"""
return u'Request: %s\nArguments: %s\nEnviron: %s\n' \
% (request, arguments, request.environ)
@classmethod
def home(cls):
"A dummy home method which just renders home.jinja"
return render_template('home.jinja')
@classmethod
def login(cls):
"""
Simple login based on the email and password
Required post data see :class:LoginForm
"""
login_form = LoginForm(request.form)
if not request.is_guest_user and request.args.get('next'):
return redirect(request.args['next'])
if request.method == 'POST' and login_form.validate():
NereidUser = Pool().get('nereid.user')
result = NereidUser.authenticate(
login_form.email.data, login_form.password.data
)
# Result can be the following:
# 1 - Browse record of User (successful login)
# 2 - None - Login failure without message
# 3 - Any other false value (no message is shown. useful if you
# want to handle the message shown to user)
if result:
# NOTE: Translators leave %s as such
flash(_("You are now logged in. Welcome %(name)s",
name=result.display_name))
session['user'] = result.id
login.send()
if request.is_xhr:
return 'OK'
else:
return redirect(
request.values.get(
'next', url_for('nereid.website.home')
)
)
elif result is None:
flash(_("Invalid login credentials"))
failed_login.send(form=login_form)
if request.is_xhr:
return 'NOK'
return render_template('login.jinja', login_form=login_form)
@classmethod
def logout(cls):
"Log the user out"
session.pop('user', None)
logout.send()
flash(
_('You have been logged out successfully. Thanks for visiting us')
)
return redirect(
request.args.get('next', url_for('nereid.website.home'))
)
@staticmethod
def account_context():
"""This fills the account context for the template
rendering my account. Additional modules might want to fill extra
data into the context
"""
return dict(
user = request.nereid_user,
party = request.nereid_user.party,
)
@classmethod
@login_required
def account(cls):
return render_template('account.jinja', **cls.account_context())
def get_currencies(self):
"""Returns available currencies for current site
.. note::
A special method is required so that the fetch can be speeded up,
by pushing the categories to the central cache which cannot be
done directly on a browse node.
"""
cache_key = key_from_list([
Transaction().cursor.dbname,
Transaction().user,
'nereid.website.get_currencies',
])
# The website is automatically appended to the cache prefix
rv = cache.get(cache_key)
if rv is None:
rv = [{
'id': c.id,
'name': c.name,
'symbol': c.symbol,
} for c in self.currencies]
cache.set(cache_key, rv, 60*60)
return rv
@staticmethod
def _user_status():
"""Returns the commonly required status parameters of the user
This method could be inherited and components could be added
"""
rv = {
'messages': get_flashed_messages()
}
if request.is_guest_user:
rv.update({
'logged_id': False
})
else:
rv.update({
'logged_in': True,
'name': request.nereid_user.display_name
})
return rv
@classmethod
def user_status(cls):
"""
Returns a JSON of the user_status
"""
return jsonify(status=cls._user_status())
class URLRule(ModelSQL, ModelView):
"""
URL Rule
~~~~~~~~
A rule that represents a single URL pattern
:param path: Path of the URL
:param name: Name of the URL. This is used for reverse mapping, hence
needs to be unique
:param handler: The handler of this URL or the target model.method
which is called. The representation is::
<model>.<method>
For example: To call list_parties method in party.party use:
party.party.list_parties
The signature of the method being called should be:
def method(self, **arguments):
return "Hello World"
where request is the request object and arguments is the dictionary
of the values generated from the match of the URL
:param active: Whether the website is active or not.
Advanced
~~~~~~~~~
:param defaults: Defaults of the URL (O2M - URLRuleDefaults)
:param method: POST, GET,
:param only_for_generation: URL will not be mapped, but can be used
for URL generation. Example for static pages, where content
delivery is managed by apache, but URL generation is necessary
:param redirect_to: (M2O self) Another URL to which the redirect has to
be done
:param sequence: Numeric sequence of the URL Map.
:param url_map: Relation field for url_rule o2m
"""
__name__ = "nereid.url_rule"
_rec_name = 'rule'
rule = fields.Char('Rule', required=True, select=True,)
endpoint = fields.Char('Endpoint', select=True,)
active = fields.Boolean('Active')
defaults = fields.One2Many('nereid.url_rule_defaults', 'rule', 'Defaults')
# Supported HTTP methods
http_method_get = fields.Boolean('GET')
http_method_post = fields.Boolean('POST')
http_method_patch = fields.Boolean('PATCH')
http_method_put = fields.Boolean('PUT')
http_method_delete = fields.Boolean('DELETE')
only_for_genaration = fields.Boolean('Only for Generation')
redirect_to = fields.Char('Redirect To')
sequence = fields.Integer('Sequence', required=True,)
url_map = fields.Many2One('nereid.url_map', 'URL Map')
@classmethod
def __setup__(cls):
super(URLRule, cls).__setup__()
cls._order.insert(0, ('sequence', 'ASC'))
@staticmethod
def default_active():
return True
@staticmethod
def default_http_method_get():
return True
def get_http_methods(self):
"""
Returns an iterable of HTTP methods that the URL has to support.
.. versionadded: 2.4.0.6
"""
methods = []
if self.http_method_get:
methods.append('GET')
if self.http_method_post:
methods.append('POST')
if self.http_method_put:
methods.append('PUT')
if self.http_method_delete:
methods.append('DELETE')
if self.http_method_patch:
methods.append('PATCH')
return methods
def get_rule_arguments(self):
"""
Return the arguments of a Rule in the corresponding format
"""
defaults = dict(
[(i.key, i.value) for i in self.defaults]
)
return {
'rule': self.rule,
'endpoint': self.endpoint,
'methods': self.get_http_methods(),
'build_only': self.only_for_genaration,
'defaults': defaults,
'redirect_to': self.redirect_to or None,
}
class URLRuleDefaults(ModelSQL, ModelView):
"""
Defaults for the URL
:param key: The char for the default's key
:param value: The Value for the default's Value
:param Rule: M2O Rule
"""
__name__ = "nereid.url_rule_defaults"
_rec_name = 'key'
key = fields.Char('Key', required=True, select=True)
value = fields.Char('Value', required=True, select=True)
rule = fields.Many2One('nereid.url_rule', 'Rule', required=True,
select=True)
class WebsiteCountry(ModelSQL):
"Website Country Relations"
__name__ = 'nereid.website-country.country'
website = fields.Many2One('nereid.website', 'Website')
country = fields.Many2One('country.country', 'Country')
class WebsiteCurrency(ModelSQL):
"Currencies to be made available on website"
__name__ = 'nereid.website-currency.currency'
_table = 'website_currency_rel'
website = fields.Many2One(
'nereid.website', 'Website',
ondelete='CASCADE', select=1, required=True)
currency = fields.Many2One(
'currency.currency', 'Currency',
ondelete='CASCADE', select=1, required=True)
| [
"# This file is part of Tryton. The COPYRIGHT file at the top level of\n",
"# this repository contains the full copyright notices and license terms.\n",
"from ast import literal_eval\n",
"\n",
"import pytz\n",
"from werkzeug import abort, redirect\n",
"from wtforms import Form, TextField, PasswordField, validators\n",
"\n",
"from nereid import jsonify, flash, render_template, url_for, cache\n",
"from nereid.globals import session, request\n",
"from nereid.helpers import login_required, key_from_list, get_flashed_messages\n",
"from nereid.signals import login, failed_login, logout\n",
"from trytond.model import ModelView, ModelSQL, fields\n",
"from trytond.backend import TableHandler\n",
"from trytond.transaction import Transaction\n",
"from trytond.pool import Pool\n",
"\n",
"from .i18n import _\n",
"\n",
"__all__ = ['URLMap', 'WebSite', 'URLRule', 'URLRuleDefaults',\n",
" 'WebsiteCountry', 'WebsiteCurrency']\n",
"\n",
"\n",
"class URLMap(ModelSQL, ModelView):\n",
" \"\"\"\n",
" URL Map\n",
" ~~~~~~~\n",
"\n",
" A collection of URLs for a website. This is analogous to werkzeug's\n",
" URL Map.\n",
"\n",
" :param name: Name of the URL Map\n",
" :param default_subdomain: Default subdomain for URLs in this Map\n",
" :param active: Whether the URL Map is active or not.\n",
"\n",
" Rules:\n",
" ~~~~~~\n",
" :param rules: O2M URLRules\n",
"\n",
" Advanced:\n",
" ~~~~~~~~~\n",
" :param charset: default value - utf-8\n",
" :param strict_slashes: Boolean field if / in url map is taken seriously\n",
" :param unique_urls: Enable `redirect_defaults` in the URL Map and\n",
" redirects the defaults to the URL \n",
" \"\"\"\n",
" __name__ = \"nereid.url_map\"\n",
"\n",
" name = fields.Char(\n",
" 'Name', required=True, select=True,\n",
" )\n",
" default_subdomain = fields.Char(\n",
" 'Default Subdomain',\n",
" )\n",
" rules = fields.One2Many(\n",
" 'nereid.url_rule',\n",
" 'url_map',\n",
" 'Rules'\n",
" )\n",
" charset = fields.Char('Char Set')\n",
" strict_slashes = fields.Boolean('Strict Slashes')\n",
" unique_urls = fields.Boolean('Unique URLs')\n",
" active = fields.Boolean('Active')\n",
"\n",
" @staticmethod\n",
" def default_active():\n",
" \"By default URL is active\"\n",
" return True\n",
"\n",
" @staticmethod\n",
" def default_charset():\n",
" \"By default characterset is utf-8\"\n",
" return 'utf-8'\n",
"\n",
" def get_rules_arguments(self):\n",
" \"\"\"\n",
" Constructs a list of dictionary of arguments needed\n",
" for URL Rule construction. A wrapper around the \n",
" URL RULE get_rule_arguments\n",
" \"\"\"\n",
" rule_args = [ ]\n",
" for rule in self.rules:\n",
" rule_args.append(rule.get_rule_arguments())\n",
" return rule_args\n",
"\n",
"\n",
"class LoginForm(Form):\n",
" \"Default Login Form\"\n",
" email = TextField(_('e-mail'), [validators.Required(), validators.Email()])\n",
" password = PasswordField(_('Password'), [validators.Required()])\n",
"\n",
"\n",
"class WebSite(ModelSQL, ModelView):\n",
" \"\"\"\n",
" One of the most powerful features of Nereid is the ability to \n",
" manage multiple websites from one back-end. A web site in nereid \n",
" represents a collection or URLs, settings.\n",
"\n",
" :param name: Name of the web site\n",
" :param base_url: The unique URL of the website, You cannot have two\n",
" websites, with the same base_url\n",
" :param url_map: The active URL Map for the website (M2O URLMap)\n",
" :param company: The company linked with the website.\n",
" :param active: Whether the website is active or not.\n",
"\n",
" \"\"\"\n",
" __name__ = \"nereid.website\"\n",
"\n",
" #: The name field is used for both information and also as\n",
" #: the site identifier for nereid. The WSGI application requires\n",
" #: SITE argument. The SITE argument is then used to load URLs and\n",
" #: other settings for the website. Needs to be unique\n",
" name = fields.Char('Name', required=True, select=True)\n",
"\n",
" #: The URLMap is made as a different object which functions as a \n",
" #: collection of Rules. This will allow easy replication of sites\n",
" #: which perform with same URL structures but different templates\n",
" url_map = fields.Many2One('nereid.url_map', 'URL Map', required=True)\n",
"\n",
" #: The company to which the website belongs. Useful when creating\n",
" #: records like sale order which require a company to be present\n",
" company = fields.Many2One('company.company', 'Company', required=True)\n",
"\n",
" active = fields.Boolean('Active')\n",
"\n",
" #: The list of countries this website operates in. Used for generating\n",
" #: Countries list in the registration form etc.\n",
" countries = fields.Many2Many(\n",
" 'nereid.website-country.country', 'website', 'country',\n",
" 'Countries Available')\n",
"\n",
" #: Allowed currencies in the website\n",
" currencies = fields.Many2Many(\n",
" 'nereid.website-currency.currency',\n",
" 'website', 'currency', 'Currencies Available')\n",
"\n",
" #: Default language\n",
" default_language = fields.Many2One('ir.lang', 'Default Language',\n",
" required=True)\n",
"\n",
" #: The res.user with which the nereid application will be loaded\n",
" #: .. versionadded: 0.3\n",
" application_user = fields.Many2One(\n",
" 'res.user', 'Application User', required=True\n",
" )\n",
" guest_user = fields.Many2One(\n",
" 'nereid.user', 'Guest user', required=True\n",
" )\n",
"\n",
" timezone = fields.Selection(\n",
" [(x, x) for x in pytz.common_timezones], 'Timezone', translate=False\n",
" )\n",
"\n",
" @staticmethod\n",
" def default_timezone():\n",
" return 'UTC'\n",
"\n",
" @staticmethod\n",
" def default_active():\n",
" return True\n",
"\n",
" @classmethod\n",
" def __setup__(cls):\n",
" super(WebSite, cls).__setup__()\n",
" cls._sql_constraints = [\n",
" ('name_uniq', 'UNIQUE(name)',\n",
" 'Another site with the same name already exists!')\n",
" ]\n",
"\n",
" @classmethod\n",
" def country_list(cls):\n",
" \"\"\"\n",
" Return the list of countries in JSON\n",
" \"\"\"\n",
" return jsonify(result = [\n",
" {'key': c.id, 'value': c.name} \\\n",
" for c in request.nereid_website.countries\n",
" ])\n",
"\n",
" @staticmethod\n",
" def subdivision_list():\n",
" \"\"\"\n",
" Return the list of states for given country\n",
" \"\"\"\n",
" country = int(request.args.get('country', 0))\n",
" if country not in [c.id for c in request.nereid_website.countries]:\n",
" abort(404)\n",
"\n",
" Subdivision = Pool().get('country.subdivision')\n",
" subdivisions = Subdivision.search([('country', '=', country)])\n",
" return jsonify(\n",
" result = [{\n",
" 'id': s.id,\n",
" 'name': s.name,\n",
" 'code': s.code,\n",
" } for s in subdivisions\n",
" ]\n",
" )\n",
"\n",
" def get_urls(self, name):\n",
" \"\"\"\n",
" Return complete list of URLs\n",
" \"\"\"\n",
" URLMap = Pool().get('nereid.url_map')\n",
" websites = self.search([('name', '=', name)])\n",
" if not websites:\n",
" raise RuntimeError(\"Website with Name %s not found\" % name)\n",
"\n",
" return URLMap.get_rules_arguments(websites[0].url_map.id)\n",
"\n",
" def stats(self, **arguments):\n",
" \"\"\"\n",
" Test method.\n",
" \"\"\"\n",
" return u'Request: %s\\nArguments: %s\\nEnviron: %s\\n' \\\n",
" % (request, arguments, request.environ)\n",
"\n",
" @classmethod\n",
" def home(cls):\n",
" \"A dummy home method which just renders home.jinja\"\n",
" return render_template('home.jinja')\n",
"\n",
" @classmethod\n",
" def login(cls):\n",
" \"\"\"\n",
" Simple login based on the email and password\n",
"\n",
" Required post data see :class:LoginForm\n",
" \"\"\"\n",
" login_form = LoginForm(request.form)\n",
"\n",
" if not request.is_guest_user and request.args.get('next'):\n",
" return redirect(request.args['next'])\n",
"\n",
" if request.method == 'POST' and login_form.validate():\n",
" NereidUser = Pool().get('nereid.user')\n",
" result = NereidUser.authenticate(\n",
" login_form.email.data, login_form.password.data\n",
" )\n",
" # Result can be the following:\n",
" # 1 - Browse record of User (successful login)\n",
" # 2 - None - Login failure without message\n",
" # 3 - Any other false value (no message is shown. useful if you \n",
" # want to handle the message shown to user)\n",
" if result:\n",
" # NOTE: Translators leave %s as such\n",
" flash(_(\"You are now logged in. Welcome %(name)s\",\n",
" name=result.display_name))\n",
" session['user'] = result.id\n",
" login.send()\n",
" if request.is_xhr:\n",
" return 'OK'\n",
" else:\n",
" return redirect(\n",
" request.values.get(\n",
" 'next', url_for('nereid.website.home')\n",
" )\n",
" )\n",
" elif result is None:\n",
" flash(_(\"Invalid login credentials\"))\n",
"\n",
" failed_login.send(form=login_form)\n",
"\n",
" if request.is_xhr:\n",
" return 'NOK'\n",
"\n",
" return render_template('login.jinja', login_form=login_form)\n",
"\n",
" @classmethod\n",
" def logout(cls):\n",
" \"Log the user out\"\n",
" session.pop('user', None)\n",
" logout.send()\n",
" flash(\n",
" _('You have been logged out successfully. Thanks for visiting us')\n",
" )\n",
" return redirect(\n",
" request.args.get('next', url_for('nereid.website.home'))\n",
" )\n",
"\n",
" @staticmethod\n",
" def account_context():\n",
" \"\"\"This fills the account context for the template\n",
" rendering my account. Additional modules might want to fill extra\n",
" data into the context\n",
" \"\"\"\n",
" return dict(\n",
" user = request.nereid_user,\n",
" party = request.nereid_user.party,\n",
" )\n",
"\n",
" @classmethod\n",
" @login_required\n",
" def account(cls):\n",
" return render_template('account.jinja', **cls.account_context())\n",
"\n",
" def get_currencies(self):\n",
" \"\"\"Returns available currencies for current site\n",
"\n",
" .. note::\n",
" A special method is required so that the fetch can be speeded up, \n",
" by pushing the categories to the central cache which cannot be \n",
" done directly on a browse node.\n",
" \"\"\"\n",
" cache_key = key_from_list([\n",
" Transaction().cursor.dbname,\n",
" Transaction().user,\n",
" 'nereid.website.get_currencies',\n",
" ])\n",
" # The website is automatically appended to the cache prefix\n",
" rv = cache.get(cache_key)\n",
" if rv is None:\n",
" rv = [{\n",
" 'id': c.id,\n",
" 'name': c.name,\n",
" 'symbol': c.symbol,\n",
" } for c in self.currencies]\n",
" cache.set(cache_key, rv, 60*60)\n",
" return rv\n",
"\n",
" @staticmethod\n",
" def _user_status():\n",
" \"\"\"Returns the commonly required status parameters of the user\n",
"\n",
" This method could be inherited and components could be added\n",
" \"\"\"\n",
" rv = {\n",
" 'messages': get_flashed_messages()\n",
" }\n",
" if request.is_guest_user:\n",
" rv.update({\n",
" 'logged_id': False\n",
" })\n",
" else:\n",
" rv.update({\n",
" 'logged_in': True,\n",
" 'name': request.nereid_user.display_name\n",
" })\n",
" return rv\n",
"\n",
" @classmethod\n",
" def user_status(cls):\n",
" \"\"\"\n",
" Returns a JSON of the user_status\n",
" \"\"\"\n",
" return jsonify(status=cls._user_status())\n",
"\n",
"\n",
"\n",
"class URLRule(ModelSQL, ModelView):\n",
" \"\"\"\n",
" URL Rule\n",
" ~~~~~~~~\n",
"\n",
" A rule that represents a single URL pattern\n",
"\n",
" :param path: Path of the URL\n",
" :param name: Name of the URL. This is used for reverse mapping, hence\n",
" needs to be unique\n",
" :param handler: The handler of this URL or the target model.method\n",
" which is called. The representation is::\n",
"\n",
" <model>.<method>\n",
"\n",
" For example: To call list_parties method in party.party use:\n",
"\n",
" party.party.list_parties\n",
"\n",
" The signature of the method being called should be:\n",
"\n",
" def method(self, **arguments):\n",
" return \"Hello World\"\n",
"\n",
" where request is the request object and arguments is the dictionary\n",
" of the values generated from the match of the URL\n",
"\n",
" :param active: Whether the website is active or not.\n",
"\n",
" Advanced\n",
" ~~~~~~~~~\n",
"\n",
" :param defaults: Defaults of the URL (O2M - URLRuleDefaults)\n",
"\n",
" :param method: POST, GET, \n",
" :param only_for_generation: URL will not be mapped, but can be used \n",
" for URL generation. Example for static pages, where content\n",
" delivery is managed by apache, but URL generation is necessary\n",
" :param redirect_to: (M2O self) Another URL to which the redirect has to\n",
" be done\n",
" :param sequence: Numeric sequence of the URL Map.\n",
" :param url_map: Relation field for url_rule o2m\n",
" \"\"\"\n",
" __name__ = \"nereid.url_rule\"\n",
" _rec_name = 'rule'\n",
"\n",
" rule = fields.Char('Rule', required=True, select=True,)\n",
" endpoint = fields.Char('Endpoint', select=True,)\n",
" active = fields.Boolean('Active')\n",
" defaults = fields.One2Many('nereid.url_rule_defaults', 'rule', 'Defaults')\n",
"\n",
" # Supported HTTP methods\n",
" http_method_get = fields.Boolean('GET')\n",
" http_method_post = fields.Boolean('POST')\n",
" http_method_patch = fields.Boolean('PATCH')\n",
" http_method_put = fields.Boolean('PUT')\n",
" http_method_delete = fields.Boolean('DELETE')\n",
"\n",
" only_for_genaration = fields.Boolean('Only for Generation')\n",
" redirect_to = fields.Char('Redirect To')\n",
" sequence = fields.Integer('Sequence', required=True,)\n",
" url_map = fields.Many2One('nereid.url_map', 'URL Map')\n",
"\n",
" @classmethod\n",
" def __setup__(cls):\n",
" super(URLRule, cls).__setup__()\n",
" cls._order.insert(0, ('sequence', 'ASC'))\n",
"\n",
" @staticmethod\n",
" def default_active():\n",
" return True\n",
"\n",
" @staticmethod\n",
" def default_http_method_get():\n",
" return True\n",
"\n",
" def get_http_methods(self):\n",
" \"\"\"\n",
" Returns an iterable of HTTP methods that the URL has to support.\n",
"\n",
" .. versionadded: 2.4.0.6\n",
" \"\"\"\n",
" methods = []\n",
" if self.http_method_get:\n",
" methods.append('GET')\n",
" if self.http_method_post:\n",
" methods.append('POST')\n",
" if self.http_method_put:\n",
" methods.append('PUT')\n",
" if self.http_method_delete:\n",
" methods.append('DELETE')\n",
" if self.http_method_patch:\n",
" methods.append('PATCH')\n",
" return methods\n",
"\n",
" def get_rule_arguments(self):\n",
" \"\"\"\n",
" Return the arguments of a Rule in the corresponding format\n",
" \"\"\"\n",
" defaults = dict(\n",
" [(i.key, i.value) for i in self.defaults]\n",
" )\n",
" return {\n",
" 'rule': self.rule,\n",
" 'endpoint': self.endpoint,\n",
" 'methods': self.get_http_methods(),\n",
" 'build_only': self.only_for_genaration,\n",
" 'defaults': defaults,\n",
" 'redirect_to': self.redirect_to or None,\n",
" }\n",
"\n",
"\n",
"class URLRuleDefaults(ModelSQL, ModelView):\n",
" \"\"\"\n",
" Defaults for the URL\n",
"\n",
" :param key: The char for the default's key\n",
" :param value: The Value for the default's Value\n",
" :param Rule: M2O Rule\n",
" \"\"\"\n",
" __name__ = \"nereid.url_rule_defaults\"\n",
" _rec_name = 'key'\n",
"\n",
" key = fields.Char('Key', required=True, select=True)\n",
" value = fields.Char('Value', required=True, select=True)\n",
" rule = fields.Many2One('nereid.url_rule', 'Rule', required=True, \n",
" select=True)\n",
"\n",
"\n",
"class WebsiteCountry(ModelSQL):\n",
" \"Website Country Relations\"\n",
" __name__ = 'nereid.website-country.country'\n",
"\n",
" website = fields.Many2One('nereid.website', 'Website')\n",
" country = fields.Many2One('country.country', 'Country')\n",
"\n",
"\n",
"class WebsiteCurrency(ModelSQL):\n",
" \"Currencies to be made available on website\"\n",
" __name__ = 'nereid.website-currency.currency'\n",
" _table = 'website_currency_rel'\n",
"\n",
" website = fields.Many2One(\n",
" 'nereid.website', 'Website',\n",
" ondelete='CASCADE', select=1, required=True)\n",
" currency = fields.Many2One(\n",
" 'currency.currency', 'Currency',\n",
" ondelete='CASCADE', select=1, required=True)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.022222222222222223,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0.0425531914893617,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 497 | 0.001274 |
# -*- coding: utf-8 -*-
from flask import render_template, Blueprint, redirect, url_for, flash, request, current_app
from flask.ext.login import login_required, current_user
from ..models import db, User, Story, Upvote
from ..forms import StoryForm, RepostStoryForm, UserForm, PasswordForm
bp = Blueprint('user', __name__)
@bp.route('/mine')
@login_required
def mine():
"""我的"""
return redirect(url_for('user.my_stories'))
@bp.route('/my_stories', defaults={'page': 1})
@bp.route('/my_stories/page/<int:page>')
@login_required
def my_stories(page):
my_stories = Story.query.filter_by(
user_id=current_user.id).order_by(Story.created.desc())
my_stories = my_stories.paginate(
page, current_app.config['FLASK_STORIES_PER_PAGE'], error_out=True)
my_stories__son_stories = []
for my_story in my_stories.items:
son_stories = Story.query.filter_by(
parent_story_id=my_story.id).order_by(Story.created.desc()).all()
my_stories__son_stories.append((my_story, son_stories))
return render_template('user/mine.html', stories=my_stories, page=page,
my_stories__son_stories=my_stories__son_stories, my=True)
@bp.route('/my_upvote_stories', defaults={'page': 1})
@bp.route('/my_upvote_stories/page/<int:page>')
@login_required
def my_upvote_stories(page):
# 拿到我喜欢的故事
# me = User.query.filter_by(id=current_user.id).first()
# my_upvote_stories = []
# for upvote in me.upvotes:
# my_upvote_stories.append(upvote.story)
my_upvote_stories = Story.query.join(
Upvote).filter_by(user_id=current_user.id)
my_upvote_stories = my_upvote_stories.paginate(
page, current_app.config['FLASK_STORIES_PER_PAGE'], error_out=True)
my_upvote_stories__son_stories = []
for my_upvote_story in my_upvote_stories.items:
son_stories = Story.query.filter_by(
parent_story_id=my_upvote_story.id).order_by(Story.created.desc()).all()
my_upvote_stories__son_stories.append((my_upvote_story, son_stories))
return render_template('user/mine.html', stories=my_upvote_stories, page=page, my_upvote_stories__son_stories=my_upvote_stories__son_stories, my_upvote=True)
@bp.route('/create_story', methods=['GET', 'POST'])
@login_required
def create_story():
form = StoryForm()
if form.validate_on_submit():
title = form.title.data.strip()
content = form.content.data.strip()
story = Story(user_id=current_user.id, title=title, content=content)
db.session.add(story)
db.session.commit()
return redirect(url_for('user.my_stories'))
return render_template('user/create_story.html', form=form)
@bp.route('/create_repost/<int:story_id>', methods=['GET', 'POST'])
@login_required
def create_repost(story_id):
form = RepostStoryForm()
stories__son_stories = []
# 原故事
story = Story.query.filter_by(id=story_id).first()
son_stories = Story.query.filter_by(
parent_story_id=story_id).order_by(Story.created.desc()).all()
# 添加父故事
if story.parent_stories_ids:
for parent_story_id in story.parent_stories_ids.split(','):
parent_story = Story.query.filter_by(id=parent_story_id).first()
stories__son_stories.append((parent_story, Story.query.filter_by(
parent_story_id=parent_story.id).order_by(Story.created.desc()).all()))
# 添加待续写的故事
stories__son_stories.append((story, son_stories))
if form.validate_on_submit():
content = form.content.data.strip()
new_story = Story(user_id=current_user.id, title=story.title, content=content,
parent_story_id=story_id, parent_stories_ids=story.parent_stories_ids + "," + str(story_id) if story.parent_stories_ids else str(story_id))
db.session.add(new_story)
db.session.commit()
return redirect(url_for('user.my_stories'))
return render_template('user/create_repost.html', form=form, stories__son_stories=stories__son_stories)
def redirect_url(default='site.index'):
return request.args.get('next') or request.referrer or url_for(default)
@bp.route('/upvote/<int:story_id>')
@login_required
def upvote(story_id):
upvote = Upvote.query.filter(Upvote.user_id == current_user.id).filter(
Upvote.story_id == story_id).first()
if not upvote:
upvote = Upvote(user_id=current_user.id, story_id=story_id)
db.session.add(upvote)
db.session.commit()
flash('成功点赞')
return redirect(redirect_url())
flash('已赞过')
return redirect(redirect_url())
@bp.route('/setting/<int:user_id>', methods=['GET', 'POST'])
@login_required
def setting(user_id):
user = User.query.filter_by(id=user_id).first()
form = UserForm(obj=user)
if form.validate_on_submit():
user.email = form.email.data.strip()
user.username = form.username.data.strip()
db.session.add(user)
db.session.commit()
flash('个人信息修改成功')
redirect(url_for('user.setting', user_id=user.id))
return render_template('user/setting.html', form=form, user=user)
@bp.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
form = PasswordForm()
if form.validate_on_submit():
current_user.password = form.new.data.strip()
db.session.add(current_user)
db.session.commit()
flash('密码修改成功')
redirect(url_for('user.change_password'))
return render_template('user/change_password.html', form=form)
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"from flask import render_template, Blueprint, redirect, url_for, flash, request, current_app\n",
"from flask.ext.login import login_required, current_user\n",
"\n",
"from ..models import db, User, Story, Upvote\n",
"from ..forms import StoryForm, RepostStoryForm, UserForm, PasswordForm\n",
"\n",
"bp = Blueprint('user', __name__)\n",
"\n",
"\n",
"@bp.route('/mine')\n",
"@login_required\n",
"def mine():\n",
" \"\"\"我的\"\"\"\n",
" return redirect(url_for('user.my_stories'))\n",
"\n",
"\n",
"@bp.route('/my_stories', defaults={'page': 1})\n",
"@bp.route('/my_stories/page/<int:page>')\n",
"@login_required\n",
"def my_stories(page):\n",
" my_stories = Story.query.filter_by(\n",
" user_id=current_user.id).order_by(Story.created.desc())\n",
" my_stories = my_stories.paginate(\n",
" page, current_app.config['FLASK_STORIES_PER_PAGE'], error_out=True)\n",
"\n",
" my_stories__son_stories = []\n",
" for my_story in my_stories.items:\n",
" son_stories = Story.query.filter_by(\n",
" parent_story_id=my_story.id).order_by(Story.created.desc()).all()\n",
" my_stories__son_stories.append((my_story, son_stories))\n",
" return render_template('user/mine.html', stories=my_stories, page=page,\n",
" my_stories__son_stories=my_stories__son_stories, my=True)\n",
"\n",
"\n",
"@bp.route('/my_upvote_stories', defaults={'page': 1})\n",
"@bp.route('/my_upvote_stories/page/<int:page>')\n",
"@login_required\n",
"def my_upvote_stories(page):\n",
" # 拿到我喜欢的故事\n",
" # me = User.query.filter_by(id=current_user.id).first()\n",
" # my_upvote_stories = []\n",
" # for upvote in me.upvotes:\n",
" # my_upvote_stories.append(upvote.story)\n",
" my_upvote_stories = Story.query.join(\n",
" Upvote).filter_by(user_id=current_user.id)\n",
" my_upvote_stories = my_upvote_stories.paginate(\n",
" page, current_app.config['FLASK_STORIES_PER_PAGE'], error_out=True)\n",
"\n",
" my_upvote_stories__son_stories = []\n",
" for my_upvote_story in my_upvote_stories.items:\n",
" son_stories = Story.query.filter_by(\n",
" parent_story_id=my_upvote_story.id).order_by(Story.created.desc()).all()\n",
" my_upvote_stories__son_stories.append((my_upvote_story, son_stories))\n",
" return render_template('user/mine.html', stories=my_upvote_stories, page=page, my_upvote_stories__son_stories=my_upvote_stories__son_stories, my_upvote=True)\n",
"\n",
"\n",
"@bp.route('/create_story', methods=['GET', 'POST'])\n",
"@login_required\n",
"def create_story():\n",
" form = StoryForm()\n",
" if form.validate_on_submit():\n",
" title = form.title.data.strip()\n",
" content = form.content.data.strip()\n",
" story = Story(user_id=current_user.id, title=title, content=content)\n",
" db.session.add(story)\n",
" db.session.commit()\n",
" return redirect(url_for('user.my_stories'))\n",
" return render_template('user/create_story.html', form=form)\n",
"\n",
"\n",
"@bp.route('/create_repost/<int:story_id>', methods=['GET', 'POST'])\n",
"@login_required\n",
"def create_repost(story_id):\n",
" form = RepostStoryForm()\n",
" stories__son_stories = []\n",
" # 原故事\n",
" story = Story.query.filter_by(id=story_id).first()\n",
" son_stories = Story.query.filter_by(\n",
" parent_story_id=story_id).order_by(Story.created.desc()).all()\n",
" # 添加父故事\n",
" if story.parent_stories_ids:\n",
" for parent_story_id in story.parent_stories_ids.split(','):\n",
" parent_story = Story.query.filter_by(id=parent_story_id).first()\n",
" stories__son_stories.append((parent_story, Story.query.filter_by(\n",
" parent_story_id=parent_story.id).order_by(Story.created.desc()).all()))\n",
" # 添加待续写的故事\n",
" stories__son_stories.append((story, son_stories))\n",
" if form.validate_on_submit():\n",
" content = form.content.data.strip()\n",
" new_story = Story(user_id=current_user.id, title=story.title, content=content,\n",
" parent_story_id=story_id, parent_stories_ids=story.parent_stories_ids + \",\" + str(story_id) if story.parent_stories_ids else str(story_id))\n",
" db.session.add(new_story)\n",
" db.session.commit()\n",
" return redirect(url_for('user.my_stories'))\n",
" return render_template('user/create_repost.html', form=form, stories__son_stories=stories__son_stories)\n",
"\n",
"\n",
"def redirect_url(default='site.index'):\n",
" return request.args.get('next') or request.referrer or url_for(default)\n",
"\n",
"\n",
"@bp.route('/upvote/<int:story_id>')\n",
"@login_required\n",
"def upvote(story_id):\n",
" upvote = Upvote.query.filter(Upvote.user_id == current_user.id).filter(\n",
" Upvote.story_id == story_id).first()\n",
" if not upvote:\n",
" upvote = Upvote(user_id=current_user.id, story_id=story_id)\n",
" db.session.add(upvote)\n",
" db.session.commit()\n",
" flash('成功点赞')\n",
" return redirect(redirect_url())\n",
" flash('已赞过')\n",
" return redirect(redirect_url())\n",
"\n",
"\n",
"@bp.route('/setting/<int:user_id>', methods=['GET', 'POST'])\n",
"@login_required\n",
"def setting(user_id):\n",
" user = User.query.filter_by(id=user_id).first()\n",
" form = UserForm(obj=user)\n",
" if form.validate_on_submit():\n",
" user.email = form.email.data.strip()\n",
" user.username = form.username.data.strip()\n",
" db.session.add(user)\n",
" db.session.commit()\n",
" flash('个人信息修改成功')\n",
" redirect(url_for('user.setting', user_id=user.id))\n",
" return render_template('user/setting.html', form=form, user=user)\n",
"\n",
"\n",
"@bp.route('/change_password', methods=['GET', 'POST'])\n",
"@login_required\n",
"def change_password():\n",
" form = PasswordForm()\n",
" if form.validate_on_submit():\n",
" current_user.password = form.new.data.strip()\n",
" db.session.add(current_user)\n",
" db.session.commit()\n",
" flash('密码修改成功')\n",
" redirect(url_for('user.change_password'))\n",
" return render_template('user/change_password.html', form=form)\n"
] | [
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.006172839506172839,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0.011494252873563218,
0.006024096385542169,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 144 | 0.000546 |
import workflows
# With this import we can use this utils file as the original workflows.utils
from workflows.utils import *
def get_allowed_transitions(obj, user):
"""Returns all allowed transitions for passed object and user. Takes the
current state of the object into account.
**Parameters:**
obj
The object for which the transitions should be returned.
user
The user for which the transitions are allowed.
"""
from gf.gas.models import GASSupplierOrder, GASMemberOrder
from flexi_auth.models import ParamRole
from consts import GAS_MEMBER, GAS_REFERRER_SUPPLIER, GAS_REFERRER_TECH, DES_ADMIN
if isinstance(obj, GASSupplierOrder):
param_roles = [
ParamRole.get_role(GAS_REFERRER_SUPPLIER, pact=obj.pact),
ParamRole.get_role(GAS_REFERRER_TECH, gas=obj.gas),
ParamRole.get_role(DES_ADMIN, des=obj.des),
]
elif isinstance(obj, GASMemberOrder):
param_roles = [
ParamRole.get_role(GAS_MEMBER, obj.gas)
]
else:
return workflows.utils.get_allowed_transitions(obj, user)
for pr in param_roles:
if user in pr.get_users():
rv = workflows.utils.get_allowed_transitions(obj, user)
break
elif isinstance(obj, GASSupplierOrder) and user.person == obj.referrer_person:
#FIXME: ugly !
rv = workflows.utils.get_allowed_transitions(obj, user)
break
else:
rv = []
return rv
#-------------------------------------------------------------------------------------
# Just moved do_transition here to let it use the new get_allowed_transitions function
#-------------------------------------------------------------------------------------
def do_transition(obj, transition, user):
"""Processes the passed transition to the passed object (if allowed).
"""
if not isinstance(transition, Transition):
try:
transition = Transition.objects.get(name=transition)
except Transition.DoesNotExist:
return False
transitions = get_allowed_transitions(obj, user)
if transition in transitions:
set_state(obj, transition.destination)
return True
else:
return False
| [
"\n",
"import workflows\n",
"\n",
"# With this import we can use this utils file as the original workflows.utils\n",
"from workflows.utils import *\n",
"\n",
"def get_allowed_transitions(obj, user):\n",
" \"\"\"Returns all allowed transitions for passed object and user. Takes the\n",
" current state of the object into account.\n",
"\n",
" **Parameters:**\n",
"\n",
" obj\n",
" The object for which the transitions should be returned.\n",
"\n",
" user\n",
" The user for which the transitions are allowed.\n",
" \"\"\"\n",
" from gf.gas.models import GASSupplierOrder, GASMemberOrder\n",
" from flexi_auth.models import ParamRole\n",
" from consts import GAS_MEMBER, GAS_REFERRER_SUPPLIER, GAS_REFERRER_TECH, DES_ADMIN\n",
" \n",
" if isinstance(obj, GASSupplierOrder):\n",
" param_roles = [\n",
" ParamRole.get_role(GAS_REFERRER_SUPPLIER, pact=obj.pact),\n",
" ParamRole.get_role(GAS_REFERRER_TECH, gas=obj.gas),\n",
" ParamRole.get_role(DES_ADMIN, des=obj.des),\n",
" ]\n",
" elif isinstance(obj, GASMemberOrder):\n",
" param_roles = [\n",
" ParamRole.get_role(GAS_MEMBER, obj.gas)\n",
" ]\n",
"\n",
" else:\n",
" return workflows.utils.get_allowed_transitions(obj, user)\n",
"\n",
" for pr in param_roles:\n",
" if user in pr.get_users():\n",
" rv = workflows.utils.get_allowed_transitions(obj, user)\n",
" break\n",
" elif isinstance(obj, GASSupplierOrder) and user.person == obj.referrer_person:\n",
" #FIXME: ugly !\n",
" rv = workflows.utils.get_allowed_transitions(obj, user)\n",
" break\n",
" else:\n",
" rv = []\n",
"\n",
" return rv \n",
"\n",
"#-------------------------------------------------------------------------------------\n",
"# Just moved do_transition here to let it use the new get_allowed_transitions function\n",
"#-------------------------------------------------------------------------------------\n",
"\n",
"def do_transition(obj, transition, user):\n",
" \"\"\"Processes the passed transition to the passed object (if allowed).\n",
" \"\"\"\n",
" if not isinstance(transition, Transition):\n",
" try:\n",
" transition = Transition.objects.get(name=transition)\n",
" except Transition.DoesNotExist:\n",
" return False\n",
"\n",
" transitions = get_allowed_transitions(obj, user)\n",
" if transition in transitions:\n",
" set_state(obj, transition.destination)\n",
" return True\n",
" else:\n",
" return False\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.1111111111111111,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.037037037037037035,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0.022988505747126436,
0.011494252873563218,
0.022988505747126436,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 69 | 0.020059 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import with_metaclass
from abc import ABCMeta, abstractproperty
from itertools import product
import numpy as np
import re
from skbio.util._decorator import classproperty, overrides, stable
from skbio.util._misc import MiniRegistry
from ._sequence import Sequence
class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
"""Store biological sequence data conforming to the IUPAC character set.
This is an abstract base class (ABC) that cannot be instantiated.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
Raises
------
ValueError
If sequence characters are not in the IUPAC character set [1]_.
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
"""
__validation_mask = None
__degenerate_codes = None
__nondegenerate_codes = None
__gap_codes = None
@classproperty
def _validation_mask(cls):
# TODO These masks could be defined (as literals) on each concrete
# object. For now, memoize!
if cls.__validation_mask is None:
cls.__validation_mask = np.invert(np.bincount(
np.fromstring(''.join(cls.alphabet), dtype=np.uint8),
minlength=cls._number_of_extended_ascii_codes).astype(bool))
return cls.__validation_mask
@classproperty
def _degenerate_codes(cls):
if cls.__degenerate_codes is None:
degens = cls.degenerate_chars
cls.__degenerate_codes = np.asarray([ord(d) for d in degens])
return cls.__degenerate_codes
@classproperty
def _nondegenerate_codes(cls):
if cls.__nondegenerate_codes is None:
nondegens = cls.nondegenerate_chars
cls.__nondegenerate_codes = np.asarray([ord(d) for d in nondegens])
return cls.__nondegenerate_codes
@classproperty
def _gap_codes(cls):
if cls.__gap_codes is None:
gaps = cls.gap_chars
cls.__gap_codes = np.asarray([ord(g) for g in gaps])
return cls.__gap_codes
@classproperty
@stable(as_of='0.4.0')
def alphabet(cls):
"""Return valid IUPAC characters.
This includes gap, non-degenerate, and degenerate characters.
Returns
-------
set
Valid IUPAC characters.
"""
return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars
@classproperty
@stable(as_of='0.4.0')
def gap_chars(cls):
"""Return characters defined as gaps.
Returns
-------
set
Characters defined as gaps.
"""
return set('-.')
@classproperty
@stable(as_of='0.4.0')
def degenerate_chars(cls):
"""Return degenerate IUPAC characters.
Returns
-------
set
Degenerate IUPAC characters.
"""
return set(cls.degenerate_map)
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def nondegenerate_chars(cls):
"""Return non-degenerate IUPAC characters.
Returns
-------
set
Non-degenerate IUPAC characters.
"""
return set() # pragma: no cover
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def degenerate_map(cls):
"""Return mapping of degenerate to non-degenerate characters.
Returns
-------
dict (set)
Mapping of each degenerate IUPAC character to the set of
non-degenerate IUPAC characters it represents.
"""
return set() # pragma: no cover
@property
def _motifs(self):
return _motifs
@overrides(Sequence)
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False, validate=True):
super(IUPACSequence, self).__init__(
sequence, metadata, positional_metadata, lowercase)
if validate:
self._validate()
def _validate(self):
# This is the fastest way that we have found to identify the
# presence or absence of certain characters (numbers).
# It works by multiplying a mask where the numbers which are
# permitted have a zero at their index, and all others have a one.
# The result is a vector which will propogate counts of invalid
# numbers and remove counts of valid numbers, so that we need only
# see if the array is empty to determine validity.
invalid_characters = np.bincount(
self._bytes, minlength=self._number_of_extended_ascii_codes
) * self._validation_mask
if np.any(invalid_characters):
bad = list(np.where(
invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
raise ValueError(
"Invalid character%s in sequence: %r. \n"
"Lowercase letters are not used in IUPAC notation. You can "
"pass `lowercase=True` if your sequence contains lowercase "
"letters.\n"
"Valid IUPAC characters: "
"%r" % ('s' if len(bad) > 1 else '',
[str(b.tostring().decode("ascii")) for b in bad] if
len(bad) > 1 else bad[0],
list(self.alphabet)))
@stable(as_of='0.4.0')
def gaps(self):
"""Find positions containing gaps in the biological sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a gap character is present
at that position in the biological sequence.
See Also
--------
has_gaps
Examples
--------
>>> from skbio import DNA
>>> s = DNA('AC-G-')
>>> s.gaps()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._gap_codes)
@stable(as_of='0.4.0')
def has_gaps(self):
"""Determine if the sequence contains one or more gap characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of gap
characters in the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_gaps()
False
>>> t = DNA('A.CAC--GACGTT')
>>> t.has_gaps()
True
"""
# TODO use count, there aren't that many gap chars
# TODO: cache results
return bool(self.gaps().any())
@stable(as_of='0.4.0')
def degenerates(self):
"""Find positions containing degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a degenerate character is
present at that position in the biological sequence.
See Also
--------
has_degenerates
nondegenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.degenerates()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._degenerate_codes)
@stable(as_of='0.4.0')
def has_degenerates(self):
"""Determine if sequence contains one or more degenerate characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of degenerate
characters in the biological sequence.
See Also
--------
degenerates
nondegenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACAC-GACGTT')
>>> s.has_degenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_degenerates()
True
"""
# TODO use bincount!
# TODO: cache results
return bool(self.degenerates().any())
@stable(as_of='0.4.0')
def nondegenerates(self):
"""Find positions containing non-degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a non-degenerate character
is present at that position in the biological sequence.
See Also
--------
has_nondegenerates
degenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.nondegenerates()
array([ True, True, False, True, False], dtype=bool)
"""
return np.in1d(self._bytes, self._nondegenerate_codes)
@stable(as_of='0.4.0')
def has_nondegenerates(self):
"""Determine if sequence contains one or more non-degenerate characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
non-degenerate characters in the biological sequence.
See Also
--------
nondegenerates
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_nondegenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_nondegenerates()
True
"""
# TODO: cache results
return bool(self.nondegenerates().any())
@stable(as_of='0.4.0')
def degap(self):
"""Return a new sequence with gap characters removed.
Returns
-------
IUPACSequence
A new sequence with all gap characters removed.
See Also
--------
gap_chars
Notes
-----
The type and metadata of the result will be the same as the
biological sequence. If positional metadata is present, it will be
filtered in the same manner as the sequence characters and included in
the resulting degapped sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('GGTC-C--ATT-C.',
... positional_metadata={'quality':range(14)})
>>> s.degap()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 9
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 55.56%
-----------------------------
0 GGTCCATTC
"""
return self[np.invert(self.gaps())]
@stable(as_of='0.4.0')
def expand_degenerates(self):
"""Yield all possible non-degenerate versions of the sequence.
Yields
------
IUPACSequence
Non-degenerate version of the sequence.
See Also
--------
degenerate_map
Notes
-----
There is no guaranteed ordering to the non-degenerate sequences that
are yielded.
Each non-degenerate sequence will have the same type, metadata,
and positional metadata as the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> seq_generator = seq.expand_degenerates()
>>> for s in sorted(seq_generator, key=str):
... s
... print('')
DNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 33.33%
-----------------------------
0 TAG
<BLANKLINE>
DNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 66.67%
-----------------------------
0 TGG
<BLANKLINE>
"""
degen_chars = self.degenerate_map
nonexpansion_chars = self.nondegenerate_chars.union(self.gap_chars)
expansions = []
for char in self:
char = str(char)
if char in nonexpansion_chars:
expansions.append(char)
else:
expansions.append(degen_chars[char])
result = product(*expansions)
return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in
result)
@stable(as_of='0.4.0-dev')
def to_regex(self):
"""Return regular expression object that accounts for degenerate chars.
Returns
-------
regex
Pre-compiled regular expression object (as from ``re.compile``)
that matches all non-degenerate versions of this sequence, and
nothing else.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> regex = seq.to_regex()
>>> regex.match('TAG').string
'TAG'
>>> regex.match('TGG').string
'TGG'
>>> regex.match('TCG') is None
True
"""
regex_string = []
for base in str(self):
if base in self.degenerate_chars:
regex_string.append('[{0}]'.format(
''.join(self.degenerate_map[base])))
else:
regex_string.append(base)
return re.compile(''.join(regex_string))
@stable(as_of='0.4.0')
def find_motifs(self, motif_type, min_length=1, ignore=None):
"""Search the biological sequence for motifs.
Options for `motif_type`:
Parameters
----------
motif_type : str
Type of motif to find.
min_length : int, optional
Only motifs at least as long as `min_length` will be returned.
ignore : 1D array_like (bool), optional
Boolean vector indicating positions to ignore when matching.
Yields
------
slice
Location of the motif in the biological sequence.
Raises
------
ValueError
If an unknown `motif_type` is specified.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACGGGGAGGCGGAG')
>>> for motif_slice in s.find_motifs('purine-run', min_length=2):
... motif_slice
... str(s[motif_slice])
slice(2, 9, None)
'GGGGAGG'
slice(10, 14, None)
'GGAG'
Gap characters can disrupt motifs:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run'):
... motif_slice
slice(0, 2, None)
slice(3, 5, None)
Gaps can be ignored by passing the gap boolean vector to `ignore`:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):
... motif_slice
slice(0, 5, None)
"""
if motif_type not in self._motifs:
raise ValueError("Not a known motif (%r) for this sequence (%s)." %
(motif_type, self.__class__.__name__))
return self._motifs[motif_type](self, min_length, ignore)
@overrides(Sequence)
def _constructor(self, **kwargs):
return self.__class__(validate=False, lowercase=False, **kwargs)
@overrides(Sequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(IUPACSequence, self)._repr_stats()
stats.append(('has gaps', '%r' % self.has_gaps()))
stats.append(('has degenerates', '%r' % self.has_degenerates()))
stats.append(('has non-degenerates', '%r' % self.has_nondegenerates()))
return stats
_motifs = MiniRegistry()
# Leave this at the bottom
_motifs.interpolate(IUPACSequence, "find_motifs")
| [
"# ----------------------------------------------------------------------------\n",
"# Copyright (c) 2013--, scikit-bio development team.\n",
"#\n",
"# Distributed under the terms of the Modified BSD License.\n",
"#\n",
"# The full license is in the file COPYING.txt, distributed with this software.\n",
"# ----------------------------------------------------------------------------\n",
"\n",
"from __future__ import absolute_import, division, print_function\n",
"from future.utils import with_metaclass\n",
"\n",
"from abc import ABCMeta, abstractproperty\n",
"from itertools import product\n",
"\n",
"import numpy as np\n",
"\n",
"import re\n",
"\n",
"from skbio.util._decorator import classproperty, overrides, stable\n",
"from skbio.util._misc import MiniRegistry\n",
"from ._sequence import Sequence\n",
"\n",
"\n",
"class IUPACSequence(with_metaclass(ABCMeta, Sequence)):\n",
" \"\"\"Store biological sequence data conforming to the IUPAC character set.\n",
"\n",
" This is an abstract base class (ABC) that cannot be instantiated.\n",
"\n",
" Attributes\n",
" ----------\n",
" values\n",
" metadata\n",
" positional_metadata\n",
" alphabet\n",
" gap_chars\n",
" nondegenerate_chars\n",
" degenerate_chars\n",
" degenerate_map\n",
"\n",
" Raises\n",
" ------\n",
" ValueError\n",
" If sequence characters are not in the IUPAC character set [1]_.\n",
"\n",
" See Also\n",
" --------\n",
" DNA\n",
" RNA\n",
" Protein\n",
"\n",
" References\n",
" ----------\n",
" .. [1] Nomenclature for incompletely specified bases in nucleic acid\n",
" sequences: recommendations 1984.\n",
" Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.\n",
" A Cornish-Bowden\n",
"\n",
" \"\"\"\n",
" __validation_mask = None\n",
" __degenerate_codes = None\n",
" __nondegenerate_codes = None\n",
" __gap_codes = None\n",
"\n",
" @classproperty\n",
" def _validation_mask(cls):\n",
" # TODO These masks could be defined (as literals) on each concrete\n",
" # object. For now, memoize!\n",
" if cls.__validation_mask is None:\n",
" cls.__validation_mask = np.invert(np.bincount(\n",
" np.fromstring(''.join(cls.alphabet), dtype=np.uint8),\n",
" minlength=cls._number_of_extended_ascii_codes).astype(bool))\n",
" return cls.__validation_mask\n",
"\n",
" @classproperty\n",
" def _degenerate_codes(cls):\n",
" if cls.__degenerate_codes is None:\n",
" degens = cls.degenerate_chars\n",
" cls.__degenerate_codes = np.asarray([ord(d) for d in degens])\n",
" return cls.__degenerate_codes\n",
"\n",
" @classproperty\n",
" def _nondegenerate_codes(cls):\n",
" if cls.__nondegenerate_codes is None:\n",
" nondegens = cls.nondegenerate_chars\n",
" cls.__nondegenerate_codes = np.asarray([ord(d) for d in nondegens])\n",
" return cls.__nondegenerate_codes\n",
"\n",
" @classproperty\n",
" def _gap_codes(cls):\n",
" if cls.__gap_codes is None:\n",
" gaps = cls.gap_chars\n",
" cls.__gap_codes = np.asarray([ord(g) for g in gaps])\n",
" return cls.__gap_codes\n",
"\n",
" @classproperty\n",
" @stable(as_of='0.4.0')\n",
" def alphabet(cls):\n",
" \"\"\"Return valid IUPAC characters.\n",
"\n",
" This includes gap, non-degenerate, and degenerate characters.\n",
"\n",
" Returns\n",
" -------\n",
" set\n",
" Valid IUPAC characters.\n",
"\n",
" \"\"\"\n",
" return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars\n",
"\n",
" @classproperty\n",
" @stable(as_of='0.4.0')\n",
" def gap_chars(cls):\n",
" \"\"\"Return characters defined as gaps.\n",
"\n",
" Returns\n",
" -------\n",
" set\n",
" Characters defined as gaps.\n",
"\n",
" \"\"\"\n",
" return set('-.')\n",
"\n",
" @classproperty\n",
" @stable(as_of='0.4.0')\n",
" def degenerate_chars(cls):\n",
" \"\"\"Return degenerate IUPAC characters.\n",
"\n",
" Returns\n",
" -------\n",
" set\n",
" Degenerate IUPAC characters.\n",
"\n",
" \"\"\"\n",
" return set(cls.degenerate_map)\n",
"\n",
" @abstractproperty\n",
" @classproperty\n",
" @stable(as_of='0.4.0')\n",
" def nondegenerate_chars(cls):\n",
" \"\"\"Return non-degenerate IUPAC characters.\n",
"\n",
" Returns\n",
" -------\n",
" set\n",
" Non-degenerate IUPAC characters.\n",
"\n",
" \"\"\"\n",
" return set() # pragma: no cover\n",
"\n",
" @abstractproperty\n",
" @classproperty\n",
" @stable(as_of='0.4.0')\n",
" def degenerate_map(cls):\n",
" \"\"\"Return mapping of degenerate to non-degenerate characters.\n",
"\n",
" Returns\n",
" -------\n",
" dict (set)\n",
" Mapping of each degenerate IUPAC character to the set of\n",
" non-degenerate IUPAC characters it represents.\n",
"\n",
" \"\"\"\n",
" return set() # pragma: no cover\n",
"\n",
" @property\n",
" def _motifs(self):\n",
" return _motifs\n",
"\n",
" @overrides(Sequence)\n",
" def __init__(self, sequence, metadata=None, positional_metadata=None,\n",
" lowercase=False, validate=True):\n",
" super(IUPACSequence, self).__init__(\n",
" sequence, metadata, positional_metadata, lowercase)\n",
"\n",
" if validate:\n",
" self._validate()\n",
"\n",
" def _validate(self):\n",
" # This is the fastest way that we have found to identify the\n",
" # presence or absence of certain characters (numbers).\n",
" # It works by multiplying a mask where the numbers which are\n",
" # permitted have a zero at their index, and all others have a one.\n",
" # The result is a vector which will propogate counts of invalid\n",
" # numbers and remove counts of valid numbers, so that we need only\n",
" # see if the array is empty to determine validity.\n",
" invalid_characters = np.bincount(\n",
" self._bytes, minlength=self._number_of_extended_ascii_codes\n",
" ) * self._validation_mask\n",
" if np.any(invalid_characters):\n",
" bad = list(np.where(\n",
" invalid_characters > 0)[0].astype(np.uint8).view('|S1'))\n",
" raise ValueError(\n",
" \"Invalid character%s in sequence: %r. \\n\"\n",
" \"Lowercase letters are not used in IUPAC notation. You can \"\n",
" \"pass `lowercase=True` if your sequence contains lowercase \"\n",
" \"letters.\\n\"\n",
" \"Valid IUPAC characters: \"\n",
" \"%r\" % ('s' if len(bad) > 1 else '',\n",
" [str(b.tostring().decode(\"ascii\")) for b in bad] if\n",
" len(bad) > 1 else bad[0],\n",
" list(self.alphabet)))\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def gaps(self):\n",
" \"\"\"Find positions containing gaps in the biological sequence.\n",
"\n",
" Returns\n",
" -------\n",
" 1D np.ndarray (bool)\n",
" Boolean vector where ``True`` indicates a gap character is present\n",
" at that position in the biological sequence.\n",
"\n",
" See Also\n",
" --------\n",
" has_gaps\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('AC-G-')\n",
" >>> s.gaps()\n",
" array([False, False, True, False, True], dtype=bool)\n",
"\n",
" \"\"\"\n",
" return np.in1d(self._bytes, self._gap_codes)\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def has_gaps(self):\n",
" \"\"\"Determine if the sequence contains one or more gap characters.\n",
"\n",
" Returns\n",
" -------\n",
" bool\n",
" Indicates whether there are one or more occurrences of gap\n",
" characters in the biological sequence.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('ACACGACGTT')\n",
" >>> s.has_gaps()\n",
" False\n",
" >>> t = DNA('A.CAC--GACGTT')\n",
" >>> t.has_gaps()\n",
" True\n",
"\n",
" \"\"\"\n",
" # TODO use count, there aren't that many gap chars\n",
" # TODO: cache results\n",
" return bool(self.gaps().any())\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def degenerates(self):\n",
" \"\"\"Find positions containing degenerate characters in the sequence.\n",
"\n",
" Returns\n",
" -------\n",
" 1D np.ndarray (bool)\n",
" Boolean vector where ``True`` indicates a degenerate character is\n",
" present at that position in the biological sequence.\n",
"\n",
" See Also\n",
" --------\n",
" has_degenerates\n",
" nondegenerates\n",
" has_nondegenerates\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('ACWGN')\n",
" >>> s.degenerates()\n",
" array([False, False, True, False, True], dtype=bool)\n",
"\n",
" \"\"\"\n",
" return np.in1d(self._bytes, self._degenerate_codes)\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def has_degenerates(self):\n",
" \"\"\"Determine if sequence contains one or more degenerate characters.\n",
"\n",
" Returns\n",
" -------\n",
" bool\n",
" Indicates whether there are one or more occurrences of degenerate\n",
" characters in the biological sequence.\n",
"\n",
" See Also\n",
" --------\n",
" degenerates\n",
" nondegenerates\n",
" has_nondegenerates\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('ACAC-GACGTT')\n",
" >>> s.has_degenerates()\n",
" False\n",
" >>> t = DNA('ANCACWWGACGTT')\n",
" >>> t.has_degenerates()\n",
" True\n",
"\n",
" \"\"\"\n",
" # TODO use bincount!\n",
" # TODO: cache results\n",
" return bool(self.degenerates().any())\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def nondegenerates(self):\n",
" \"\"\"Find positions containing non-degenerate characters in the sequence.\n",
"\n",
" Returns\n",
" -------\n",
" 1D np.ndarray (bool)\n",
" Boolean vector where ``True`` indicates a non-degenerate character\n",
" is present at that position in the biological sequence.\n",
"\n",
" See Also\n",
" --------\n",
" has_nondegenerates\n",
" degenerates\n",
" has_nondegenerates\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('ACWGN')\n",
" >>> s.nondegenerates()\n",
" array([ True, True, False, True, False], dtype=bool)\n",
"\n",
" \"\"\"\n",
" return np.in1d(self._bytes, self._nondegenerate_codes)\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def has_nondegenerates(self):\n",
" \"\"\"Determine if sequence contains one or more non-degenerate characters\n",
"\n",
" Returns\n",
" -------\n",
" bool\n",
" Indicates whether there are one or more occurrences of\n",
" non-degenerate characters in the biological sequence.\n",
"\n",
" See Also\n",
" --------\n",
" nondegenerates\n",
" degenerates\n",
" has_degenerates\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('NWNNNNNN')\n",
" >>> s.has_nondegenerates()\n",
" False\n",
" >>> t = DNA('ANCACWWGACGTT')\n",
" >>> t.has_nondegenerates()\n",
" True\n",
"\n",
" \"\"\"\n",
" # TODO: cache results\n",
" return bool(self.nondegenerates().any())\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def degap(self):\n",
" \"\"\"Return a new sequence with gap characters removed.\n",
"\n",
" Returns\n",
" -------\n",
" IUPACSequence\n",
" A new sequence with all gap characters removed.\n",
"\n",
" See Also\n",
" --------\n",
" gap_chars\n",
"\n",
" Notes\n",
" -----\n",
" The type and metadata of the result will be the same as the\n",
" biological sequence. If positional metadata is present, it will be\n",
" filtered in the same manner as the sequence characters and included in\n",
" the resulting degapped sequence.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('GGTC-C--ATT-C.',\n",
" ... positional_metadata={'quality':range(14)})\n",
" >>> s.degap()\n",
" DNA\n",
" -----------------------------\n",
" Positional metadata:\n",
" 'quality': <dtype: int64>\n",
" Stats:\n",
" length: 9\n",
" has gaps: False\n",
" has degenerates: False\n",
" has non-degenerates: True\n",
" GC-content: 55.56%\n",
" -----------------------------\n",
" 0 GGTCCATTC\n",
"\n",
" \"\"\"\n",
" return self[np.invert(self.gaps())]\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def expand_degenerates(self):\n",
" \"\"\"Yield all possible non-degenerate versions of the sequence.\n",
"\n",
" Yields\n",
" ------\n",
" IUPACSequence\n",
" Non-degenerate version of the sequence.\n",
"\n",
" See Also\n",
" --------\n",
" degenerate_map\n",
"\n",
" Notes\n",
" -----\n",
" There is no guaranteed ordering to the non-degenerate sequences that\n",
" are yielded.\n",
"\n",
" Each non-degenerate sequence will have the same type, metadata,\n",
" and positional metadata as the biological sequence.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> seq = DNA('TRG')\n",
" >>> seq_generator = seq.expand_degenerates()\n",
" >>> for s in sorted(seq_generator, key=str):\n",
" ... s\n",
" ... print('')\n",
" DNA\n",
" -----------------------------\n",
" Stats:\n",
" length: 3\n",
" has gaps: False\n",
" has degenerates: False\n",
" has non-degenerates: True\n",
" GC-content: 33.33%\n",
" -----------------------------\n",
" 0 TAG\n",
" <BLANKLINE>\n",
" DNA\n",
" -----------------------------\n",
" Stats:\n",
" length: 3\n",
" has gaps: False\n",
" has degenerates: False\n",
" has non-degenerates: True\n",
" GC-content: 66.67%\n",
" -----------------------------\n",
" 0 TGG\n",
" <BLANKLINE>\n",
"\n",
" \"\"\"\n",
" degen_chars = self.degenerate_map\n",
" nonexpansion_chars = self.nondegenerate_chars.union(self.gap_chars)\n",
"\n",
" expansions = []\n",
" for char in self:\n",
" char = str(char)\n",
" if char in nonexpansion_chars:\n",
" expansions.append(char)\n",
" else:\n",
" expansions.append(degen_chars[char])\n",
"\n",
" result = product(*expansions)\n",
" return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in\n",
" result)\n",
"\n",
" @stable(as_of='0.4.0-dev')\n",
" def to_regex(self):\n",
" \"\"\"Return regular expression object that accounts for degenerate chars.\n",
"\n",
" Returns\n",
" -------\n",
" regex\n",
" Pre-compiled regular expression object (as from ``re.compile``)\n",
" that matches all non-degenerate versions of this sequence, and\n",
" nothing else.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> seq = DNA('TRG')\n",
" >>> regex = seq.to_regex()\n",
" >>> regex.match('TAG').string\n",
" 'TAG'\n",
" >>> regex.match('TGG').string\n",
" 'TGG'\n",
" >>> regex.match('TCG') is None\n",
" True\n",
"\n",
" \"\"\"\n",
" regex_string = []\n",
" for base in str(self):\n",
" if base in self.degenerate_chars:\n",
" regex_string.append('[{0}]'.format(\n",
" ''.join(self.degenerate_map[base])))\n",
" else:\n",
" regex_string.append(base)\n",
" return re.compile(''.join(regex_string))\n",
"\n",
" @stable(as_of='0.4.0')\n",
" def find_motifs(self, motif_type, min_length=1, ignore=None):\n",
" \"\"\"Search the biological sequence for motifs.\n",
"\n",
" Options for `motif_type`:\n",
"\n",
" Parameters\n",
" ----------\n",
" motif_type : str\n",
" Type of motif to find.\n",
" min_length : int, optional\n",
" Only motifs at least as long as `min_length` will be returned.\n",
" ignore : 1D array_like (bool), optional\n",
" Boolean vector indicating positions to ignore when matching.\n",
"\n",
" Yields\n",
" ------\n",
" slice\n",
" Location of the motif in the biological sequence.\n",
"\n",
" Raises\n",
" ------\n",
" ValueError\n",
" If an unknown `motif_type` is specified.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from skbio import DNA\n",
" >>> s = DNA('ACGGGGAGGCGGAG')\n",
" >>> for motif_slice in s.find_motifs('purine-run', min_length=2):\n",
" ... motif_slice\n",
" ... str(s[motif_slice])\n",
" slice(2, 9, None)\n",
" 'GGGGAGG'\n",
" slice(10, 14, None)\n",
" 'GGAG'\n",
"\n",
" Gap characters can disrupt motifs:\n",
"\n",
" >>> s = DNA('GG-GG')\n",
" >>> for motif_slice in s.find_motifs('purine-run'):\n",
" ... motif_slice\n",
" slice(0, 2, None)\n",
" slice(3, 5, None)\n",
"\n",
" Gaps can be ignored by passing the gap boolean vector to `ignore`:\n",
"\n",
" >>> s = DNA('GG-GG')\n",
" >>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):\n",
" ... motif_slice\n",
" slice(0, 5, None)\n",
"\n",
" \"\"\"\n",
" if motif_type not in self._motifs:\n",
" raise ValueError(\"Not a known motif (%r) for this sequence (%s).\" %\n",
" (motif_type, self.__class__.__name__))\n",
"\n",
" return self._motifs[motif_type](self, min_length, ignore)\n",
"\n",
" @overrides(Sequence)\n",
" def _constructor(self, **kwargs):\n",
" return self.__class__(validate=False, lowercase=False, **kwargs)\n",
"\n",
" @overrides(Sequence)\n",
" def _repr_stats(self):\n",
" \"\"\"Define custom statistics to display in the sequence's repr.\"\"\"\n",
" stats = super(IUPACSequence, self)._repr_stats()\n",
" stats.append(('has gaps', '%r' % self.has_gaps()))\n",
" stats.append(('has degenerates', '%r' % self.has_degenerates()))\n",
" stats.append(('has non-degenerates', '%r' % self.has_nondegenerates()))\n",
" return stats\n",
"\n",
"\n",
"_motifs = MiniRegistry()\n",
"\n",
"# Leave this at the bottom\n",
"_motifs.interpolate(IUPACSequence, \"find_motifs\")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 584 | 0.000063 |
# -*- coding: utf-8 -*-
"""
pygments.sphinxext
~~~~~~~~~~~~~~~~~~
Sphinx extension to generate automatic documentation of lexers,
formatters and filters.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
MODULEDOC = '''
.. module:: %s
%s
%s
'''
LEXERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
:MIME types: %s
%s
'''
FMTERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
%s
'''
FILTERDOC = '''
.. class:: %s
:Name: %s
%s
'''
class PygmentsDoc(Directive):
"""
A directive to collect all lexers/formatters/filters and generate
autoclass directives for them.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
self.filenames = set()
if self.arguments[0] == 'lexers':
out = self.document_lexers()
elif self.arguments[0] == 'formatters':
out = self.document_formatters()
elif self.arguments[0] == 'filters':
out = self.document_filters()
else:
raise Exception('invalid argument for "pygmentsdoc" directive')
node = nodes.compound()
vl = ViewList(out.split('\n'), source='')
nested_parse_with_titles(self.state, vl, node)
for fn in self.filenames:
self.state.document.settings.record_dependencies.add(fn)
return node.children
def document_lexers(self):
from pygments.lexers._mapping import LEXERS
out = []
modules = {}
moduledocstrings = {}
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
if not cls.__doc__:
print("Warning: %s does not have a docstring." % classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
modules.setdefault(module, []).append((
classname,
', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
', '.join(data[4]) or 'None',
docstring))
if module not in moduledocstrings:
moddoc = mod.__doc__
if isinstance(moddoc, bytes):
moddoc = moddoc.decode('utf8')
moduledocstrings[module] = moddoc
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
for data in lexers:
out.append(LEXERDOC % data)
return ''.join(out)
def document_formatters(self):
from pygments.formatters import FORMATTERS
out = []
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
heading = cls.__name__
out.append(FMTERDOC % (heading, ', '.join(data[1]) or 'None',
', '.join(data[2]).replace('*', '\\*') or 'None',
docstring))
return ''.join(out)
def document_filters(self):
from pygments.filters import FILTERS
out = []
for name, cls in FILTERS.items():
self.filenames.add(sys.modules[cls.__module__].__file__)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
out.append(FILTERDOC % (cls.__name__, name, docstring))
return ''.join(out)
def setup(app):
app.add_directive('pygmentsdoc', PygmentsDoc)
| [
"# -*- coding: utf-8 -*-\n",
"\"\"\"\n",
" pygments.sphinxext\n",
" ~~~~~~~~~~~~~~~~~~\n",
"\n",
" Sphinx extension to generate automatic documentation of lexers,\n",
" formatters and filters.\n",
"\n",
" :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.\n",
" :license: BSD, see LICENSE for details.\n",
"\"\"\"\n",
"\n",
"from __future__ import print_function\n",
"\n",
"import sys\n",
"\n",
"from docutils import nodes\n",
"from docutils.statemachine import ViewList\n",
"from sphinx.util.compat import Directive\n",
"from sphinx.util.nodes import nested_parse_with_titles\n",
"\n",
"\n",
"MODULEDOC = '''\n",
".. module:: %s\n",
"\n",
"%s\n",
"%s\n",
"'''\n",
"\n",
"LEXERDOC = '''\n",
".. class:: %s\n",
"\n",
" :Short names: %s\n",
" :Filenames: %s\n",
" :MIME types: %s\n",
"\n",
" %s\n",
"\n",
"'''\n",
"\n",
"FMTERDOC = '''\n",
".. class:: %s\n",
"\n",
" :Short names: %s\n",
" :Filenames: %s\n",
"\n",
" %s\n",
"\n",
"'''\n",
"\n",
"FILTERDOC = '''\n",
".. class:: %s\n",
"\n",
" :Name: %s\n",
"\n",
" %s\n",
"\n",
"'''\n",
"\n",
"class PygmentsDoc(Directive):\n",
" \"\"\"\n",
" A directive to collect all lexers/formatters/filters and generate\n",
" autoclass directives for them.\n",
" \"\"\"\n",
" has_content = False\n",
" required_arguments = 1\n",
" optional_arguments = 0\n",
" final_argument_whitespace = False\n",
" option_spec = {}\n",
"\n",
" def run(self):\n",
" self.filenames = set()\n",
" if self.arguments[0] == 'lexers':\n",
" out = self.document_lexers()\n",
" elif self.arguments[0] == 'formatters':\n",
" out = self.document_formatters()\n",
" elif self.arguments[0] == 'filters':\n",
" out = self.document_filters()\n",
" else:\n",
" raise Exception('invalid argument for \"pygmentsdoc\" directive')\n",
" node = nodes.compound()\n",
" vl = ViewList(out.split('\\n'), source='')\n",
" nested_parse_with_titles(self.state, vl, node)\n",
" for fn in self.filenames:\n",
" self.state.document.settings.record_dependencies.add(fn)\n",
" return node.children\n",
"\n",
" def document_lexers(self):\n",
" from pygments.lexers._mapping import LEXERS\n",
" out = []\n",
" modules = {}\n",
" moduledocstrings = {}\n",
" for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):\n",
" module = data[0]\n",
" mod = __import__(module, None, None, [classname])\n",
" self.filenames.add(mod.__file__)\n",
" cls = getattr(mod, classname)\n",
" if not cls.__doc__:\n",
" print(\"Warning: %s does not have a docstring.\" % classname)\n",
" docstring = cls.__doc__\n",
" if isinstance(docstring, bytes):\n",
" docstring = docstring.decode('utf8')\n",
" modules.setdefault(module, []).append((\n",
" classname,\n",
" ', '.join(data[2]) or 'None',\n",
" ', '.join(data[3]).replace('*', '\\\\*').replace('_', '\\\\') or 'None',\n",
" ', '.join(data[4]) or 'None',\n",
" docstring))\n",
" if module not in moduledocstrings:\n",
" moddoc = mod.__doc__\n",
" if isinstance(moddoc, bytes):\n",
" moddoc = moddoc.decode('utf8')\n",
" moduledocstrings[module] = moddoc\n",
"\n",
" for module, lexers in sorted(modules.items(), key=lambda x: x[0]):\n",
" heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')\n",
" out.append(MODULEDOC % (module, heading, '-'*len(heading)))\n",
" for data in lexers:\n",
" out.append(LEXERDOC % data)\n",
"\n",
" return ''.join(out)\n",
"\n",
" def document_formatters(self):\n",
" from pygments.formatters import FORMATTERS\n",
"\n",
" out = []\n",
" for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):\n",
" module = data[0]\n",
" mod = __import__(module, None, None, [classname])\n",
" self.filenames.add(mod.__file__)\n",
" cls = getattr(mod, classname)\n",
" docstring = cls.__doc__\n",
" if isinstance(docstring, bytes):\n",
" docstring = docstring.decode('utf8')\n",
" heading = cls.__name__\n",
" out.append(FMTERDOC % (heading, ', '.join(data[1]) or 'None',\n",
" ', '.join(data[2]).replace('*', '\\\\*') or 'None',\n",
" docstring))\n",
" return ''.join(out)\n",
"\n",
" def document_filters(self):\n",
" from pygments.filters import FILTERS\n",
"\n",
" out = []\n",
" for name, cls in FILTERS.items():\n",
" self.filenames.add(sys.modules[cls.__module__].__file__)\n",
" docstring = cls.__doc__\n",
" if isinstance(docstring, bytes):\n",
" docstring = docstring.decode('utf8')\n",
" out.append(FILTERDOC % (cls.__name__, name, docstring))\n",
" return ''.join(out)\n",
"\n",
"\n",
"def setup(app):\n",
" app.add_directive('pygmentsdoc', PygmentsDoc)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 155 | 0.000445 |
''' sensor-post reads sensor data from a sql lite database and posts it to the server
Copyright (C) 2013 Sagar G V
E-mail : [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import time
import urllib2, urllib
import sqlite3 as lite
debug = True
con = lite.connect('temp.db',isolation_level=None)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS SensorData(Id INTEGER PRIMARY KEY, time STRING, sensorid STRING, sensortype STRING, sensordata STRING);")
def read_sensor_data():
s = cur.execute("SELECT Id,time,sensortype,sensorid,sensordata FROM SensorData ORDER BY RANDOM() LIMIT 1;")
if s:
r = s.fetchone()
if r:
id,t,sensortype,sensorid,sensordata = r
return [ str(id), str(sensortype), str(sensorid), str(t), str(sensordata) ]
def delete_sensor_data(id):
cur.execute("DELETE FROM SensorData WHERE Id=%s" % str(id))
def post_reading(reading):
# reading is a tupe like this ( 'sensor_type','sensor_id' , 'time' , 'val_ciphertext' )
data=[('sensorid',reading[1]),('sensortype',reading[0]),('time',reading[2]),('val',reading[3])]
data=urllib.urlencode(data)
path='https://rnicu-web.appspot.com/sensor/update'
req=urllib2.Request(path, data)
req.add_header("Content-type", "application/x-www-form-urlencoded")
try:
response = urllib2.urlopen(req).read()
rcode = int(response[0:2])
if debug:
print response
return rcode
except:
return -1
while True:
r = read_sensor_data()
if r:
ret = post_reading(r[1:])
if ret == 0:
delete_sensor_data(r[0])
else:
if debug:
print ret
time.sleep(5)
time.sleep(1) | [
"'''\tsensor-post reads sensor data from a sql lite database and posts it to the server\n",
"\n",
"\tCopyright (C) 2013 Sagar G V\n",
"\tE-mail : [email protected]\n",
"\n",
" This program is free software: you can redistribute it and/or modify\n",
" it under the terms of the GNU Affero General Public License as published by\n",
" the Free Software Foundation, either version 3 of the License, or\n",
" (at your option) any later version.\n",
"\n",
" This program is distributed in the hope that it will be useful,\n",
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
" GNU Affero General Public License for more details.\n",
"\n",
" You should have received a copy of the GNU Affero General Public License\n",
" along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"'''\n",
"\n",
"import time\n",
"import urllib2, urllib\n",
"import sqlite3 as lite\n",
"\n",
"debug = True\n",
"\n",
"con = lite.connect('temp.db',isolation_level=None)\n",
"cur = con.cursor()\n",
"cur.execute(\"CREATE TABLE IF NOT EXISTS SensorData(Id INTEGER PRIMARY KEY, time STRING, sensorid STRING, sensortype STRING, sensordata STRING);\")\n",
"\n",
"def read_sensor_data():\n",
"\ts = cur.execute(\"SELECT Id,time,sensortype,sensorid,sensordata FROM SensorData ORDER BY RANDOM() LIMIT 1;\")\n",
"\tif s:\n",
"\t\tr = s.fetchone()\n",
"\t\tif r:\n",
"\t\t\tid,t,sensortype,sensorid,sensordata = r\n",
"\t\t\treturn [ str(id), str(sensortype), str(sensorid), str(t), str(sensordata) ]\n",
"\n",
"def delete_sensor_data(id):\n",
"\tcur.execute(\"DELETE FROM SensorData WHERE Id=%s\" % str(id))\n",
"\n",
"def post_reading(reading):\n",
"\t# reading is a tupe like this ( 'sensor_type','sensor_id' , 'time' , 'val_ciphertext' )\n",
"\n",
"\tdata=[('sensorid',reading[1]),('sensortype',reading[0]),('time',reading[2]),('val',reading[3])]\n",
"\tdata=urllib.urlencode(data)\n",
"\tpath='https://rnicu-web.appspot.com/sensor/update'\n",
"\treq=urllib2.Request(path, data)\n",
"\treq.add_header(\"Content-type\", \"application/x-www-form-urlencoded\")\n",
"\n",
"\ttry:\n",
"\t\tresponse = urllib2.urlopen(req).read()\n",
"\t\trcode = int(response[0:2])\n",
"\t\tif debug:\n",
"\t\t\tprint response\n",
"\t\treturn rcode\n",
"\texcept:\n",
"\t\treturn -1\n",
"\n",
"while True:\n",
"\tr = read_sensor_data()\n",
"\tif r:\n",
"\t\tret = post_reading(r[1:])\n",
"\t\tif ret == 0:\n",
"\t\t\tdelete_sensor_data(r[0])\n",
"\t\telse:\n",
"\t\t\tif debug:\n",
"\t\t\t\tprint ret\n",
"\t\t\ttime.sleep(5)\n",
"\ttime.sleep(1)"
] | [
0.011627906976744186,
0,
0.03225806451612903,
0.029411764705882353,
0,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0.0196078431372549,
0,
0.00684931506849315,
0,
0.041666666666666664,
0.027522935779816515,
0.14285714285714285,
0.05263157894736842,
0.125,
0.11627906976744186,
0.0379746835443038,
0,
0.03571428571428571,
0.01639344262295082,
0,
0.037037037037037035,
0.022222222222222223,
0,
0.10309278350515463,
0.06896551724137931,
0.038461538461538464,
0.06060606060606061,
0.014492753623188406,
0,
0.16666666666666666,
0.024390243902439025,
0.034482758620689655,
0.08333333333333333,
0.05555555555555555,
0.06666666666666667,
0.2222222222222222,
0.08333333333333333,
0,
0.08333333333333333,
0.041666666666666664,
0.14285714285714285,
0.03571428571428571,
0.06666666666666667,
0.03571428571428571,
0.125,
0.07692307692307693,
0.07142857142857142,
0.058823529411764705,
0.14285714285714285
] | 70 | 0.038793 |
Subsets and Splits