text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
sequencelengths 1
23
| text_hash
stringlengths 64
64
|
---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculate properties for electrons."""
import logging
import numpy
from .calculationmethod import Method
class Electrons(Method):
"""A container for methods pertaining to electrons."""
def __init__(self, data, progress=None, loglevel=logging.INFO, logname="Log"):
super(Electrons, self).__init__(data, progress, loglevel, logname)
def __str__(self):
"""Returns a string representation of the object."""
return "Electrons"
def __repr__(self):
"""Returns a representation of the object."""
return "Electrons"
def count(self, core=False):
"""Returns the electron count in system.
Normally returns electrons used in calculation, but will include
core electrons in pseudopotentials if core is True.
"""
nelectrons = sum(self.data.atomnos) - self.data.charge
if core:
nelectrons += sum(self.data.coreelectrons)
return nelectrons
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| Schamnad/cclib | src/cclib/method/electrons.py | Python | bsd-3-clause | 1,269 | [
"cclib"
] | 0e0e7a99eb80412404f2e612e85c94144c7b061e6d2c06333c59aa99bab95612 |
# imports
import pandas as pd
import numpy as np
import time
import os
from tabulate import tabulate
import sys
from operator import add
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql import functions as F #https://stackoverflow.com/questions/39504950/python-pyspark-get-sum-of-a-pyspark-dataframe-column-values
sys.path.append('..')
sys.path.append('..')
from get_type_lists import get_type_lists
from target_encoder import target_encoder
from feature_combiner import feature_combiner
from logging_lib.LoggingController import LoggingController
sys.path.remove('..')
sys.path.remove('..')
#Define your s3 bucket to load and store data
S3_BUCKET = 'emr-related-files'
#Create a custom logger to log statistics and plots
logger = LoggingController()
logger.s3_bucket = S3_BUCKET
#.config('spark.executor.cores','6') \
spark = SparkSession.builder \
.appName("App") \
.getOrCreate()
# .master("local[*]") \
# .config('spark.cores.max','16')
#.master("local") \
# .config("spark.some.config.option", "some-value") \
spark.sparkContext.setLogLevel('WARN') #Get rid of all the junk in output
Y = 'y'
ID_VAR = 'ID'
DROPS = [ID_VAR]
#From an XGBoost model
# NOTE the top 6 are categorical, might want to look into this.
MOST_IMPORTANT_VARS_ORDERD = ['X5','X0','X8','X3','X1','X2','X314','X47','X118',\
'X315','X29','X127','X236','X115','X383','X152','X151','X351','X327','X77','X104',\
'X267','X95','X142']
#Load data from s3
train = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://emr-related-files/train.csv')
test = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://emr-related-files/test.csv')
#this needs to be done for h2o glm.predict() bug (which needs same number of columns)
test = test.withColumn(Y,test[ID_VAR])
#Work around for splitting wide data, you need to split on only an ID varaibles
#Then join back with a train varaible (bug in spark as of 2.1 with randomSplit())
(train1,valid1) = train.select(ID_VAR).randomSplit([0.7,0.3], seed=123)
valid = valid1.join(train, ID_VAR,'inner')
train = train1.join(train,ID_VAR,'inner')
# print('TRAIN DATA')
# train.show(2)
# print('VALID DATA')
# valid.show(2)
#workdaround for h2o predict
test1 = test.select(ID_VAR,Y)
test2 = test.drop(Y)
test = test1.join(test2,ID_VAR,'inner')
original_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
print("Encoding numberic variables...")
training_df_list, test_df_list,valid_df_list = list(),list(),list()
for i, var in enumerate(cats):
total = len(cats)
print('Encoding: ' + var + ' (' + str(i+1) + '/' + str(total) + ') ...')
logger.log_string('Encoding: ' + var + ' (' + str(i+1) + '/' + str(total) + ') ...')
tr_enc,v_enc, ts_enc = target_encoder(train, test, var, Y,valid_frame=valid,frame_type='spark',id_col=ID_VAR)
training_df_list.append(tr_enc)
test_df_list.append(ts_enc)
valid_df_list.append(v_enc)
#join all the new variables
for i, df in enumerate(training_df_list):
train = train.join(training_df_list[i],ID_VAR,'inner')
valid = valid.join(valid_df_list[i],ID_VAR,'inner')
test = test.join(test_df_list[i],ID_VAR,'inner')
# print('TRAIN DATA')
# train.show(2)
# print('VALID DATA')
# valid.show(2)
# print('TEST DATA')
# test.show(2)
print('Done encoding.')
encoded_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
#Remplace cats with encoded cats from MOST_IMPORTANT_VARS_ORDERD
for i, v in enumerate(MOST_IMPORTANT_VARS_ORDERD):
if v in cats:
MOST_IMPORTANT_VARS_ORDERD[i] = v + '_Tencode'
print('Combining features....')
(train, valid, test) = feature_combiner(train, test, MOST_IMPORTANT_VARS_ORDERD, valid_frame = valid, frame_type='spark')
print('Done combining features.')
encoded_combined_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
################################################################################
# DONE WITH PREPROCESSING - START TRAINING #
################################################################################
import h2o
h2o.show_progress() # turn on progress bars
from h2o.estimators.glm import H2OGeneralizedLinearEstimator # import GLM models
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.grid.grid_search import H2OGridSearch # grid search
import matplotlib
matplotlib.use('Agg') #Need this if running matplot on a server w/o display
from pysparkling import *
conf = H2OConf(spark=spark)
conf.nthreads = -1
hc = H2OContext.getOrCreate(spark,conf)
print('Making h2o frames...')
trainHF = hc.as_h2o_frame(train, "trainTable")
validHF = hc.as_h2o_frame(valid, "validTable")
testHF = hc.as_h2o_frame(test, "testTable")
print('Done making h2o frames.')
logger.log_string("Train Summary:")
logger.log_string("Rows:{}".format(trainHF.nrow))
logger.log_string("Cols:{}".format(trainHF.ncol))
# print(trainHF.summary(return_data=True))
# logger.log_string(tabulate(trainHF.summary(return_data=True),tablefmt="grid"))
# logger.log_string(trainHF._ex._cache._tabulate('grid',False))
base_train, stack_train = trainHF.split_frame([0.5], seed=12345)
base_valid, stack_valid = validHF.split_frame([0.5], seed=12345)
# def upload_submission(sub,predict_column='predict'):
# # create time stamp
# import re
# import time
# time_stamp = re.sub('[: ]', '_', time.asctime())
#
# # save file for submission
# # sub.columns = [ID_VAR, Y]
# sub_fname = 'Submission_'+str(time_stamp) + '.csv'
# # h2o.download_csv(sub, 's3n://'+S3_BUCKET+'/kaggle_submissions/Mercedes/' +sub_fname)
#
# spark_sub_frame = hc.as_spark_frame(sub)
#
# spark_sub_frame.select(ID_VAR,predict_column).coalesce(1).write.option("header","true").csv('s3n://'+S3_BUCKET+'/Kaggle_Submissions/Mercedes/' +sub_fname)
def glm_grid(X, y, train, valid, should_submit = False):
""" Wrapper function for penalized GLM with alpha and lambda search.
:param X: List of inputs.
:param y: Name of target variable.
:param train: Name of training H2OFrame.
:param valid: Name of validation H2OFrame.
:return: Best H2Omodel from H2OGeneralizedLinearEstimator
"""
alpha_opts = [0.01, 0.25, 0.5, 0.99] # always keep some L2
family = ["gaussian", "binomial", "quasibinomial", "multinomial", "poisson", "gamma", "tweedie"]
hyper_parameters = {"alpha":alpha_opts
}
# initialize grid search
grid = H2OGridSearch(
H2OGeneralizedLinearEstimator(
family="gaussian",
lambda_search=True,
seed=12345),
hyper_params=hyper_parameters)
# train grid
grid.train(y=y,
x=X,
training_frame=train,
validation_frame=valid)
# show grid search results
print(grid.show())
best = grid.get_grid()[0]
print(best)
# if should_submit:
# sub_frame = testHF[ID_VAR].cbind(best.predict(testHF))
# print(sub_frame.col_names)
# print('Submission frame preview:')
# print(sub_frame[0:10, [ID_VAR, 'predict']])
# upload_submission(sub_frame,'predict')
# plot top frame values
print('yhat_frame')
yhat_frame = valid.cbind(best.predict(valid))
print(yhat_frame[0:10, [y, 'predict']])
# plot sorted predictions
yhat_frame_df = yhat_frame[[y, 'predict']].as_data_frame()
yhat_frame_df.sort_values(by='predict', inplace=True)
yhat_frame_df.reset_index(inplace=True, drop=True)
plt = yhat_frame_df.plot(title='Ranked Predictions Plot')
logger.log_string('Ranked Predictions Plot')
logger.log_matplotlib_plot(plt)
# select best model
return best
def neural_net_grid(X, y, train, valid):
# define random grid search parameters
hyper_parameters = {'hidden': [[170, 320], [80, 190], [320, 160, 80], [100], [50, 50, 50, 50]],
'l1':[s/1e4 for s in range(0, 1000, 100)],
'l2':[s/1e5 for s in range(0, 1000, 100)],
'input_dropout_ratio':[s/1e2 for s in range(0, 20, 2)]}
# define search strategy
search_criteria = {'strategy':'RandomDiscrete',
'max_models':100,
'max_runtime_secs':60*60*2, #2 hours
}
# initialize grid search
gsearch = H2OGridSearch(H2ODeepLearningEstimator,
hyper_params=hyper_parameters,
search_criteria=search_criteria)
# execute training w/ grid search
gsearch.train(x=X,
y=y,
training_frame=train,
validation_frame=valid,
activation='TanhWithDropout',
epochs=2000,
stopping_rounds=20,
sparse=True, # handles data w/ many zeros more efficiently
ignore_const_cols=True,
adaptive_rate=True)
best_model = gsearch.get_grid()[0]
return best_model
def gboosting_grid(X, y, train, valid):
# define random grid search parameters
hyper_parameters = {'ntrees':list(range(0, 500, 50)),
'max_depth':list(range(0, 20, 2)),
'sample_rate':[s/float(10) for s in range(1, 11)],
'col_sample_rate':[s/float(10) for s in range(1, 11)]}
# define search strategy
search_criteria = {'strategy':'RandomDiscrete',
'max_models':100,
'max_runtime_secs':60*60*2, #2 hours
}
# initialize grid search
gsearch = H2OGridSearch(H2OGradientBoostingEstimator,
hyper_params=hyper_parameters,
search_criteria=search_criteria)
# execute training w/ grid search
gsearch.train(x=X,
y=y,
training_frame=train,
validation_frame=valid)
best_model = gsearch.get_grid()[0]
return best_model
print('Training..')
logger.log_string('glm0')
glm0 = glm_grid(original_nums, Y, base_train, base_valid)
logger.log_string('glm1')
glm1 = glm_grid(encoded_nums, Y, base_train, base_valid)
logger.log_string('glm2')
glm2 = glm_grid(encoded_combined_nums, Y, base_train, base_valid)
# logger.log_string('rnn0')
# rnn0 = neural_net_grid(original_nums, Y, base_train, base_valid)
# logger.log_string('rnn1')
# rnn1 = neural_net_grid(encoded_nums, Y, base_train, base_valid)
# logger.log_string('rnn2')
# rnn2 = neural_net_grid(encoded_combined_nums, Y, base_train, base_valid)
#
# logger.log_string('gbm0')
# gbm0 = gboosting_grid(original_nums, Y, base_train, base_valid)
# logger.log_string('gbm1')
# gbm1 = gboosting_grid(encoded_nums, Y, base_train, base_valid)
# logger.log_string('gbm2')
# gbm2 = gboosting_grid(encoded_combined_nums, Y, base_train, base_valid)
print('DONE training.')
stack_train = stack_train.cbind(glm0.predict(stack_train))
stack_valid = stack_valid.cbind(glm0.predict(stack_valid))
stack_train = stack_train.cbind(glm1.predict(stack_train))
stack_valid = stack_valid.cbind(glm1.predict(stack_valid))
stack_train = stack_train.cbind(glm2.predict(stack_train))
stack_valid = stack_valid.cbind(glm2.predict(stack_valid))
# stack_train = stack_train.cbind(rnn0.predict(stack_train))
# stack_valid = stack_valid.cbind(rnn0.predict(stack_valid))
# stack_train = stack_train.cbind(rnn1.predict(stack_train))
# stack_valid = stack_valid.cbind(rnn1.predict(stack_valid))
# stack_train = stack_train.cbind(rnn2.predict(stack_train))
# stack_valid = stack_valid.cbind(rnn2.predict(stack_valid))
#
# stack_train = stack_train.cbind(gbm0.predict(stack_train))
# stack_valid = stack_valid.cbind(gbm0.predict(stack_valid))
# stack_train = stack_train.cbind(gbm1.predict(stack_train))
# stack_valid = stack_valid.cbind(gbm1.predict(stack_valid))
# stack_train = stack_train.cbind(gbm2.predict(stack_train))
# stack_valid = stack_valid.cbind(gbm2.predict(stack_valid))
testHF = testHF.cbind(glm0.predict(testHF))
testHF = testHF.cbind(glm1.predict(testHF))
testHF = testHF.cbind(glm2.predict(testHF))
# testHF = testHF.cbind(rnn0.predict(testHF))
# testHF = testHF.cbind(rnn1.predict(testHF))
# testHF = testHF.cbind(rnn2.predict(testHF))
# testHF = testHF.cbind(gbm0.predict(testHF))
# testHF = testHF.cbind(gbm1.predict(testHF))
# testHF = testHF.cbind(gbm2.predict(testHF))
logger.log_string('glm3')
glm3 = glm_grid(encoded_combined_nums + ['predict', 'predict0','predict1'], Y, stack_train, stack_valid, should_submit=True)
# rnn = neural_net_grid(MOST_IMPORTANT_VARS_ORDERD + ['predict', 'predict0', 'predict1'], Y, stack_train, stack_valid)
sub = testHF[ID_VAR].cbind(rnn.predict(testHF))
print(sub.head())
# create time stamp
import re
import time
time_stamp = re.sub('[: ]', '_', time.asctime())
# save file for submission
sub.columns = [ID_VAR, Y]
sub_fname = 'Submission_'+str(time_stamp) + '.csv'
# h2o.download_csv(sub, 's3n://'+S3_BUCKET+'/kaggle_submissions/Mercedes/' +sub_fname)
spark_sub_frame = hc.as_spark_frame(sub)
spark_sub_frame.select(ID_VAR,Y).coalesce(1).write.option("header","true").csv('s3n://'+S3_BUCKET+'/Kaggle_Submissions/Mercedes/' +sub_fname)
| kcrandall/Kaggle_Mercedes_Manufacturing | spark/experiements/niki/experiment1.py | Python | mit | 13,526 | [
"Gaussian"
] | 134186991391b4e6bcb186c08d941642a13f9a1ab661e6f36bfc2c635ed7e35b |
"""
Support for cftime axis in matplotlib.
"""
import warnings
import cftime
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
from ._version import version as __version__ # noqa: F401
_DEFAULT_RESOLUTION = "DAILY"
_TIME_UNITS = "days since 2000-01-01"
class CalendarDateTime:
"""
Container for a :py:class:`cftime.datetime` object and calendar.
Parameters
----------
datetime : :py:class:`cftime.datetime`
The datetime instance associated with this
:py:class:`CalendarDateTime` object.
calendar : str
The calendar type of the datetime object, e.g. ``"noleap"``. See
:py:class:`cftime.datetime` documentation for a full list of valid
calendar strings.
Notes
-----
This class is no longer needed and will be deprecated in nc-time-axis
version 1.5.
"""
def __init__(self, datetime, calendar):
warnings.warn(
"CalendarDateTime is obsolete and will be deprecated in nc_time_axis "
"version 1.5. Please consider switching to plotting instances or "
"subclasses of cftime.datetime directly.",
DeprecationWarning,
)
self.datetime = datetime
self.calendar = calendar
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.datetime == other.datetime
and self.calendar == other.calendar
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return (
f"<{type(self).__name__}: datetime={self.datetime}, "
"calendar={self.calendar}>"
)
_RESOLUTION_TO_FORMAT = {
"SECONDLY": "%H:%M:%S",
"MINUTELY": "%H:%M",
"HOURLY": "%Y-%m-%d %H:%M",
"DAILY": "%Y-%m-%d",
"MONTHLY": "%Y-%m",
"YEARLY": "%Y",
}
class AutoCFTimeFormatter(mticker.Formatter):
"""
Automatic formatter for :py:class:`cftime.datetime` data.
Automatically chooses a date format based on the resolution set by the
:py:class:`NetCDFDateTimeLocator`. If no resolution is set, a default
format of ``"%Y-%m-%d"`` is used.
Parameters
----------
locator : NetCDFDateTimeLocator
The locator to be associated with this formatter.
calendar : str
The calendar type of the axis, e.g. ``"noleap"``. See the
:py:class:`cftime.datetime` documentation for a full list of valid
calendar strings.
time_units : str, optional
The time units the numeric tick values represent. Note this will
be deprecated in nc-time-axis version 1.5.
"""
def __init__(self, locator, calendar, time_units=None):
#: The locator associated with this formatter. This is used to get hold
#: of the scaling information.
self.locator = locator
self.calendar = calendar
if time_units is not None:
warnings.warn(
"The time_units argument will be removed in nc_time_axis "
"version 1.5",
DeprecationWarning,
)
self.time_units = time_units
else:
self.time_units = _TIME_UNITS
def pick_format(self, resolution):
return _RESOLUTION_TO_FORMAT[resolution]
def __call__(self, x, pos=0):
format_string = self.pick_format(self.locator.resolution)
dt = cftime.num2date(x, self.time_units, calendar=self.calendar)
return dt.strftime(format_string)
class NetCDFTimeDateFormatter(AutoCFTimeFormatter):
def __init__(self, *args, **kwargs):
warnings.warn(
"NetCDFTimeDateFormatter will be named AutoCFTimeFormatter "
"in nc_time_axis version 1.5",
FutureWarning,
)
super(NetCDFTimeDateFormatter, self).__init__(*args, **kwargs)
class CFTimeFormatter(mticker.Formatter):
"""
A formatter for explicitly setting the format of a
:py:class:`cftime.datetime` axis.
Parameters
----------
format : str Format string that can be passed to cftime.datetime.strftime,
e.g. ``"%Y-%m-%d"``. See `the Python documentation
<https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes>`_
for acceptable format codes.
calendar : str
The calendar type of the axis, e.g. ``"noleap"``. See the
:py:class:`cftime.datetime` documentation for a full list of valid
calendar strings.
"""
def __init__(self, format, calendar):
self.format = format
self.calendar = calendar
def __call__(self, x, pos=0):
dt = cftime.num2date(x, _TIME_UNITS, calendar=self.calendar)
return dt.strftime(self.format)
class NetCDFTimeDateLocator(mticker.Locator):
"""
Determines tick locations when plotting :py:class:`cftime.datetime` data.
Parameters
----------
max_n_ticks : int
The maximum number of ticks along the axis. This is passed internally
to a :py:class:`matplotlib.ticker.MaxNLocator` class.
calendar : str
The calendar type of the axis, e.g. ``"noleap"``. See the
:py:class:`cftime.datetime` documentation for a full list of valid
calendar strings.
date_unit : str
The time units the numeric tick values represent. Note this will
be deprecated in nc-time-axis version 1.5.
min_n_ticks : int, default 3
The minimum number of ticks along the axis. Note this is currently
not used.
"""
real_world_calendars = (
"gregorian",
"julian",
"proleptic_gregorian",
"standard",
)
def __init__(self, max_n_ticks, calendar, date_unit=None, min_n_ticks=3):
# The date unit must be in the form of days since ...
self.max_n_ticks = max_n_ticks
self.min_n_ticks = min_n_ticks
self._max_n_locator = mticker.MaxNLocator(max_n_ticks, integer=True)
self._max_n_locator_days = mticker.MaxNLocator(
max_n_ticks, integer=True, steps=[1, 2, 4, 7, 10]
)
self.calendar = calendar
if date_unit is not None:
warnings.warn(
"The date_unit argument will be removed in "
"nc_time_axis version 1.5",
DeprecationWarning,
)
self.date_unit = date_unit
else:
self.date_unit = _TIME_UNITS
if not self.date_unit.lower().startswith("days since"):
emsg = (
"The date unit must be days since for a NetCDF "
"time locator."
)
raise ValueError(emsg)
self.resolution = _DEFAULT_RESOLUTION
self._cached_resolution = {}
def compute_resolution(self, num1, num2, date1, date2):
"""
Returns the resolution of the dates (hourly, minutely, yearly), and
an **approximate** number of those units.
"""
num_days = float(np.abs(num1 - num2))
resolution = "SECONDLY"
n = mdates.SEC_PER_DAY
if num_days * mdates.MINUTES_PER_DAY > self.max_n_ticks:
resolution = "MINUTELY"
n = int(num_days / mdates.MINUTES_PER_DAY)
if num_days * mdates.HOURS_PER_DAY > self.max_n_ticks:
resolution = "HOURLY"
n = int(num_days / mdates.HOURS_PER_DAY)
if num_days > self.max_n_ticks:
resolution = "DAILY"
n = int(num_days)
if num_days > 30 * self.max_n_ticks:
resolution = "MONTHLY"
n = num_days // 30
if num_days > 365 * self.max_n_ticks:
resolution = "YEARLY"
n = abs(date1.year - date2.year)
self.resolution = resolution
return resolution, n
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(
vmin, vmax, expander=1e-7, tiny=1e-13
)
lower = cftime.num2date(vmin, self.date_unit, calendar=self.calendar)
upper = cftime.num2date(vmax, self.date_unit, calendar=self.calendar)
resolution, n = self.compute_resolution(vmin, vmax, lower, upper)
def has_year_zero(year):
result = dict()
if self.calendar in self.real_world_calendars and not bool(year):
result = dict(has_year_zero=True)
return result
if resolution == "YEARLY":
# TODO START AT THE BEGINNING OF A DECADE/CENTURY/MILLENIUM as
# appropriate.
years = self._max_n_locator.tick_values(lower.year, upper.year)
ticks = [
cftime.datetime(
int(year),
1,
1,
calendar=self.calendar,
**has_year_zero(year),
)
for year in years
]
elif resolution == "MONTHLY":
# TODO START AT THE BEGINNING OF A DECADE/CENTURY/MILLENIUM as
# appropriate.
months_offset = self._max_n_locator.tick_values(0, n)
ticks = []
for offset in months_offset:
year = lower.year + np.floor((lower.month + offset) / 12)
month = ((lower.month + offset) % 12) + 1
dt = cftime.datetime(
int(year),
int(month),
1,
calendar=self.calendar,
**has_year_zero(year),
)
ticks.append(dt)
elif resolution == "DAILY":
# TODO: It would be great if this favoured multiples of 7.
days = self._max_n_locator_days.tick_values(vmin, vmax)
ticks = [
cftime.num2date(dt, self.date_unit, calendar=self.calendar)
for dt in days
]
elif resolution == "HOURLY":
hour_unit = "hours since 2000-01-01"
in_hours = cftime.date2num(
[lower, upper], hour_unit, calendar=self.calendar
)
hours = self._max_n_locator.tick_values(in_hours[0], in_hours[1])
ticks = [
cftime.num2date(dt, hour_unit, calendar=self.calendar)
for dt in hours
]
elif resolution == "MINUTELY":
minute_unit = "minutes since 2000-01-01"
in_minutes = cftime.date2num(
[lower, upper], minute_unit, calendar=self.calendar
)
minutes = self._max_n_locator.tick_values(
in_minutes[0], in_minutes[1]
)
ticks = [
cftime.num2date(dt, minute_unit, calendar=self.calendar)
for dt in minutes
]
elif resolution == "SECONDLY":
second_unit = "seconds since 2000-01-01"
in_seconds = cftime.date2num(
[lower, upper], second_unit, calendar=self.calendar
)
seconds = self._max_n_locator.tick_values(
in_seconds[0], in_seconds[1]
)
ticks = [
cftime.num2date(dt, second_unit, calendar=self.calendar)
for dt in seconds
]
else:
emsg = f"Resolution {resolution} not implemented yet."
raise ValueError(emsg)
# Some calenders do not allow a year 0.
# Remove ticks to avoid raising an error.
if self.calendar in [
"proleptic_gregorian",
"gregorian",
"julian",
"standard",
]:
ticks = [t for t in ticks if t.year != 0]
return cftime.date2num(ticks, self.date_unit, calendar=self.calendar)
class NetCDFTimeConverter(mdates.DateConverter):
"""
Converter for :py:class:`cftime.datetime` data.
"""
standard_unit = "days since 2000-01-01"
@staticmethod
def axisinfo(unit, axis):
"""
Returns the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
calendar, date_unit, date_type = unit
majloc = NetCDFTimeDateLocator(4, calendar=calendar)
majfmt = AutoCFTimeFormatter(majloc, calendar=calendar)
if date_type is CalendarDateTime:
datemin = CalendarDateTime(
cftime.datetime(2000, 1, 1), calendar=calendar
)
datemax = CalendarDateTime(
cftime.datetime(2010, 1, 1), calendar=calendar
)
else:
datemin = date_type(2000, 1, 1)
datemax = date_type(2010, 1, 1)
return munits.AxisInfo(
majloc=majloc,
majfmt=majfmt,
label="",
default_limits=(datemin, datemax),
)
@classmethod
def default_units(cls, sample_point, axis):
"""
Computes some units for the given data point.
"""
if hasattr(sample_point, "__iter__"):
# Deal with nD `sample_point` arrays.
if isinstance(sample_point, np.ndarray):
sample_point = sample_point.reshape(-1)
calendars = np.array([point.calendar for point in sample_point])
if np.all(calendars == calendars[0]):
calendar = calendars[0]
else:
raise ValueError("Calendar units are not all equal.")
date_type = type(sample_point[0])
else:
# Deal with a single `sample_point` value.
if not hasattr(sample_point, "calendar"):
msg = (
"Expecting cftimes with an extra " '"calendar" attribute.'
)
raise ValueError(msg)
else:
calendar = sample_point.calendar
date_type = type(sample_point)
if calendar == "":
raise ValueError(
"A calendar must be defined to plot dates using a cftime axis."
)
return calendar, _TIME_UNITS, date_type
@classmethod
def convert(cls, value, unit, axis):
"""
Converts value, if it is not already a number or sequence of numbers,
with :py:func:`cftime.date2num`.
"""
shape = None
if isinstance(value, np.ndarray):
# Don't do anything with numeric types.
if value.dtype != object:
return value
shape = value.shape
value = value.reshape(-1)
first_value = value[0]
else:
# Don't do anything with numeric types.
if munits.ConversionInterface.is_numlike(value):
return value
# Not an array but a list of non-numerical types (thus assuming datetime types)
elif isinstance(value, (list, tuple)):
first_value = value[0]
else:
# Neither numerical, list or ndarray : must be a datetime scalar.
first_value = value
if not isinstance(first_value, (CalendarDateTime, cftime.datetime)):
raise ValueError(
"The values must be numbers or instances of "
'"nc_time_axis.CalendarDateTime" or '
'"cftime.datetime".'
)
if isinstance(first_value, CalendarDateTime):
if not isinstance(first_value.datetime, cftime.datetime):
raise ValueError(
"The datetime attribute of the "
"CalendarDateTime object must be of type "
"`cftime.datetime`."
)
if isinstance(first_value, CalendarDateTime):
if isinstance(value, (np.ndarray, list, tuple)):
value = [v.datetime for v in value]
else:
value = value.datetime
result = cftime.date2num(
value, _TIME_UNITS, calendar=first_value.calendar
)
if shape is not None:
result = result.reshape(shape)
return result
# Automatically register NetCDFTimeConverter with matplotlib.unit's converter
# dictionary.
if CalendarDateTime not in munits.registry:
munits.registry[CalendarDateTime] = NetCDFTimeConverter()
CFTIME_TYPES = [
cftime.datetime,
cftime.DatetimeNoLeap,
cftime.DatetimeAllLeap,
cftime.DatetimeProlepticGregorian,
cftime.DatetimeGregorian,
cftime.Datetime360Day,
cftime.DatetimeJulian,
]
for date_type in CFTIME_TYPES:
if date_type not in munits.registry:
munits.registry[date_type] = NetCDFTimeConverter()
| SciTools/nc-time-axis | nc_time_axis/__init__.py | Python | bsd-3-clause | 16,851 | [
"NetCDF"
] | 50591a061d00248a39fc48c3fe6da6c208374e4f536f5643b6f56a94dffa9654 |
#!/usr/bin/env python
"""
Reads a structure from a Gaussian Log file and other parameters from a
Gaussian Com file and creates a new Gaussian Com.
"""
import argparse
import sys
import os
# qt_scripts modules
from omg.gaussian.gaussian import GaussianLog
from omg.gaussian.gaussian import GaussianCom
from omg.misc import increment_filename
def get_args():
"Parse arguments of gau_log2com.py"
parser = argparse.ArgumentParser(
description="""
Reads a structure from a Gaussian Log file and other parameters from a
Gaussian Com file and creates a new Gaussian Com.""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('log', help='gaussian log filename')
parser.add_argument('-s', '--scan_step',
help='scan step (default = last = 0)',
default=0,
type=int)
parser.add_argument('-o', '--opt_step',
help='opt step (default = last = 0)',
default=0,
type=int)
parser.add_argument('--new_com',
help='new com name (default = increment template com)',
default='')
parser.add_argument('--template_com',
help='(default = same name as .log)',
default='')
args = parser.parse_args()
if args.template_com == '':
args.template_com = os.path.splitext(args.log)[0] + '.com'
if args.new_com == '':
args.new_com = increment_filename(args.template_com)
if os.path.exists(args.new_com):
sys.stderr.write(
'{0} already exists. Aborting.\n'.format(args.new_com))
sys.exit(2)
args.scan_step -= 1
args.opt_step -= 1
return args
def main():
"""
Reads a structure from a Gaussian Log file and other parameters from a
Gaussian Com file and creates a new Gaussian Com.
"""
args = get_args()
gaulog = GaussianLog(args.log)
gaucom = GaussianCom(args.template_com)
atoms_log_coords = gaulog.read_geometry(args.opt_step, args.scan_step)
for no, atom in enumerate(gaucom.atoms_list):
atom.SetVector(atoms_log_coords[no].GetVector())
gaucom.write_to_file(args.new_com)
if __name__ == "__main__":
main()
| eduardoftoliveira/oniomMacGyver | scripts/gau_log2com.py | Python | gpl-3.0 | 2,333 | [
"Gaussian"
] | af3c45c6af400b52dadf3c510f9e02d9f86395a8d3432e8c663181de12ecb549 |
"""Script to combine ReferenceGenomes into a single one.
"""
import os
from Bio import SeqIO
from Bio.Alphabet.IUPAC import ambiguous_dna
from django.core.exceptions import ObjectDoesNotExist
from main.models import Dataset
from main.models import ReferenceGenome
from utils.import_util import add_dataset_to_entity
from utils import generate_safe_filename_prefix_from_label
DATASET_TO_SEQIO_FORMAT = {
Dataset.TYPE.REFERENCE_GENOME_GENBANK: 'genbank',
Dataset.TYPE.REFERENCE_GENOME_FASTA: 'fasta'
}
def combine_list_allformats(reference_genome_list,
new_ref_genome_label, project):
"""Combine ReferenceGenomes into a new single ReferenceGenome
composed of the component parts.
Args:
reference_genome_list: List of ReferenceGenome objects.
new_ref_genome_label: Label for the new ReferenceGenome.
project: Project to which the new ReferenceGenome will be added.
Returns:
Object with keys:
* is_success
* new_reference_genome (when is_success = True)
* error_msg (when is_success = False)
"""
rg_dataset_list = []
for ref_genome in reference_genome_list:
rg_dataset_tup = None
for dataset_type in [Dataset.TYPE.REFERENCE_GENOME_GENBANK,
Dataset.TYPE.REFERENCE_GENOME_FASTA]:
filter_result = ref_genome.dataset_set.filter(type=dataset_type)
if len(filter_result):
rg_dataset_tup = (ref_genome, filter_result[0])
break
if (not rg_dataset_tup or
not os.path.exists(rg_dataset_tup[1].get_absolute_location())):
return {
'is_success': False,
'error_msg': 'All reference genomes must have an associated \
FASTA or Genbank dataset'
}
else:
rg_dataset_list.append(rg_dataset_tup)
assert len(rg_dataset_list) == len(reference_genome_list)
# Read the datasets into Biopython SeqRecord objects.
rg_seqrecord_list = []
seqrecord_ids = []
for rg, dataset in rg_dataset_list:
with open(dataset.get_absolute_location()) as input_fh:
for record in SeqIO.parse(input_fh,
DATASET_TO_SEQIO_FORMAT[dataset.type]):
rg_seqrecord_list.append((rg,record))
seqrecord_ids.append('_'.join([rg.label[:7], record.id[:8]]))
# If ReferenceGenome label and Chromosome id are the same, there will be
# duplicate seqrecord_ids: resolve by including numeric prefix in id
seq_record_list = []
MAX_LOCUS_NAME_LEN = 16
unique_id_len = len(str(len(seqrecord_ids)))
label_len = (MAX_LOCUS_NAME_LEN - 2 - unique_id_len) / 2
for i,seqrecord_id in enumerate(seqrecord_ids):
rg, seqrecord = rg_seqrecord_list[i]
if seqrecord_ids.count(seqrecord_id) == 1:
unique_seqrecord_id = seqrecord_id
else:
unique_seqrecord_id = '_'.join(
[str(i), rg.label[:label_len], seqrecord.id[:label_len]])
seqrecord.name = seqrecord.id = unique_seqrecord_id
seqrecord.seq.alphabet = ambiguous_dna
seq_record_list.append(seqrecord)
# Create a new ReferenceGenome.
new_ref_genome = ReferenceGenome.objects.create(
project=project,
label=new_ref_genome_label,
num_chromosomes=len(seq_record_list),
num_bases=sum([len(seq) for seq in seq_record_list]))
# Generate a filename from the label with non-alphanumeric characters
# replaced by underscores.
filename_prefix = generate_safe_filename_prefix_from_label(
new_ref_genome_label)
does_list_include_genbank = Dataset.TYPE.REFERENCE_GENOME_GENBANK in \
[rg_dataset_tup[1].type for rg_dataset_tup in rg_dataset_list]
if does_list_include_genbank:
filename = filename_prefix + '.gb'
else:
filename = filename_prefix + '.fa'
new_file_dest = os.path.join(new_ref_genome.get_model_data_dir(), filename)
# Write the result.
ref_genome_dataset_type = Dataset.TYPE.REFERENCE_GENOME_GENBANK if \
does_list_include_genbank else Dataset.TYPE.REFERENCE_GENOME_FASTA
output_file_format = DATASET_TO_SEQIO_FORMAT[ref_genome_dataset_type]
with open(new_file_dest, 'w') as output_fh:
SeqIO.write(seq_record_list, output_fh, output_file_format)
# Create a dataset which will point to the file. This step must happen after
# writing the file because a signal will be triggered which requires the
# Genbank to exist already.
add_dataset_to_entity(new_ref_genome, ref_genome_dataset_type,
ref_genome_dataset_type, new_file_dest)
return {
'is_success': True,
'new_reference_genome': new_ref_genome
}
| woodymit/millstone_accidental_source | genome_designer/utils/combine_reference_genomes.py | Python | mit | 4,829 | [
"Biopython"
] | ae8405f3d11e4fd13e2a593a60bb97d74baa427908c07ad021da1017fc41b0cd |
from chemfiles import Trajectory
file = Trajectory("filename.xyz")
frame = file.read()
print("There are {} atoms in the frame".format(len(frame.atoms)))
positions = frame.positions
# Do awesome science here with the positions
if frame.has_velocities():
velocities = frame.velocities
# If the file contains information about the
# velocities, you will find them here.
| chemfiles/chemfiles.github.io | src/code/example.py | Python | bsd-3-clause | 383 | [
"Chemfiles"
] | d5dd0985ac7e6c2063942a6884bb7084275c904caf55ad797729cc7aff614cee |
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email [email protected]
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Kadish.py
Age: Kadish Tolesa
Date: February 2003
Event Manager hooks for Kadish Tolesa
"""
from Plasma import *
from PlasmaTypes import *
class Kadish(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5207
self.version = 1
def OnFirstUpdate(self):
#~ # record our visit in player's chronicle
#~ kModuleName = "Garden"
#~ kChronicleVarName = "LinksIntoGarden"
#~ kChronicleVarType = 0
#~ vault = ptVault()
#~ if type(vault) != type(None):
#~ entry = vault.findChronicleEntry(kChronicleVarName)
#~ if type(entry) == type(None):
#~ # not found... add current level chronicle
#~ vault.addChronicleEntry(kChronicleVarName,kChronicleVarType,"%d" %(1))
#~ PtDebugPrint("%s:\tentered new chronicle counter %s" % (kModuleName,kChronicleVarName))
#~ else:
#~ import string
#~ count = string.atoi(entry.chronicleGetValue())
#~ count = count + 1
#~ entry.chronicleSetValue("%d" % (count))
#~ entry.save()
#~ PtDebugPrint("%s:\tyour current count for %s is %s" % (kModuleName,kChronicleVarName,entry.chronicleGetValue()))
#~ else:
#~ PtDebugPrint("%s:\tERROR trying to access vault -- can't update %s variable in chronicle." % (kModuleName,kChronicleVarName))
pass
def Load(self):
pass
def OnNotify(self,state,id,events):
pass
| zrax/moul-scripts | Python/Kadish.py | Python | gpl-3.0 | 3,388 | [
"VisIt"
] | a34e91586fc3b62881533cfe5de68d1c76a1faca05879b2c366821cb8af9d664 |
#!/usr/bin/env python2
"""RNAalignment - a module to work with RNA sequence alignments.
To see a full demo what you can do with this util, please take a look at the jupiter notebook (https://github.com/mmagnus/rna-pdb-tools/blob/master/rna_tools/tools/rna_alignment/rna_alignment.ipynb)
Load an alignment in the Stockholm::
alignment = ra.RNAalignment('test_data/RF00167.stockholm.sto')
or fasta format::
import rna_alignment as ra
alignment = ra.fasta2stokholm(alignment.fasta)
alignment = ra.RNAalignment
Parameters of the aligmnent::
print(alignment.describe())
Consensus SS::
print(alignment.ss_cons_with_pk)
Get sequnce/s from teh aligment::
>>> seq = a.io[0]
"""
from Bio import AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Phylo.TreeConstruction import DistanceCalculator
from rna_tools import SecondaryStructure
from rna_tools.rna_tools_config import RCHIE_PATH
from collections import OrderedDict
import Levenshtein
import tempfile
import subprocess
import os
import shutil
import re
import gzip
import copy
class RNAalignmentError(Exception):
pass
class RChieError(Exception):
pass
class RFAMFetchError(Exception):
pass
class RChie:
"""RChie - plotting arc diagrams of RNA secondary structures.
.. image:: ../pngs/rchie.png
http://www.e-rna.org/r-chie/
The offline version of R-chie, which requires first installing R4RNA is available here, or clone our git repository here
How to install it:
- Ensure R is installed already, or download it freely from http://www.r-project.org/
- Download the R4RNA (https://github.com/jujubix/r-chie), open R and install the package::
install.packages("<path_to_file>/R4RNA", repos = NULL, type="source")
# Install the optparse and RColorBrewer
install.packages('optparse')
install.packages('RColorBrewer')
- Go to rna_tools/rna_tools_config_local.py and set RCHIE_PATH to the folder with RChie, e.g. ``"/home/magnus/work/opt/r-chie/"``.
To test if Rchie works on your machine (from rna_align folder)::
<path to your rchie>/rchie.R --msafile test_data/rchie_test_files/fasta.txt test_data/rchie_test_files/helix.txt
you should have rchie.png file in the folder.
More at http://www.e-rna.org/r-chie/download.cgi
Cite: Daniel Lai, Jeff R. Proctor, Jing Yun A. Zhu, and Irmtraud M. Meyer (2012) R-chie: a web server and R package for visualizing RNA secondary structures. Nucleic Acids Research, first published online March 19, 2012. doi:10.1093/nar/gks241
"""
def __init__(self):
pass
def plot_cov(self, seqs, ss_cons, plot_fn='rchie.png', verbose=False):
"""Plot an RChie plot_conv.
:param seqs: seqs in the fasta format
:param ss_cons: a string of secondary structure consensus, use only ``().``. Works with pseuoknots.
"""
fasta_alignment = tempfile.NamedTemporaryFile(delete=False)
with open(fasta_alignment.name, 'w') as f:
f.write(seqs)
plot = tempfile.NamedTemporaryFile(delete=False)
plot.name += '.png'
ss = tempfile.NamedTemporaryFile(delete=False)
with open(ss.name, 'w') as f:
f.write(ss_cons)
if not RCHIE_PATH:
raise RChieError('RChie path not set up!')
cmd = RCHIE_PATH + \
"rchie.R --msafile='%s' --format1 vienna '%s' --output '%s'" % (
fasta_alignment.name, ss.name, plot.name)
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = o.stdout.read().strip()
err = o.stderr.read().strip()
# *****PROCESSING MSA FILE*****
# Error in readFasta(opt$msafile, filter = TRUE) : no FASTA sequences found
# Error: ERROR: Invalid FASTA file
# Execution halted
if "error" in str(err).lower():
raise Exception('\n'.join([cmd, err]))
if verbose:
print('\n'.join([cmd, err]))
self.plotfn = plot.name
if verbose:
print(self.plotfn)
if plot_fn:
shutil.move(plot.name, plot_fn)
print('Rchie: plot saved to %s' % plot_fn)
from IPython.display import Image
return Image(filename=plot_fn)
def show(self):
from IPython.display import Image
return Image(filename=self.plotfn)
def write(self, outfn):
shutil.copyfile(self.plotfn, outfn)
print('Write to %s' % outfn)
class RNASeq(object):
"""RNASeq.
Args:
id (str) : id of a sequence
seq (str) : seq, it be uppercased.
ss (str) : secondary structure, default None
Attributes:
seq_no_gaps(str) : seq.replace('-', '')
ss_no_gaps(str) : ss.replace('-', '')
"""
def __init__(self, id, seq, ss=None):
self.id = id
self.seq = seq.upper()
# self.ss_raw = ss # this will not be changed after remove_gaps.
# so maybe don't use ss_raw at call
self.ss = ss
self.ss = self.get_ss_std()
self.seq_no_gaps = seq.replace('-', '')
self.ss_no_gaps = ss.replace('-', '')
#@property
def get_ss_std(self):
nss = ''
for s in self.ss:
nss += get_rfam_ss_notat_to_dot_bracket_notat(s)
return nss
def __repr__(self):
return self.id
def __len__(self):
return len(self.seq)
def __getitem__(self, i):
if self.ss:
return RNASeq(self.id + '_slice', self.seq[i], self.ss[i])
else:
return RNASeq(self.id + '_slice', self.seq[i])
def remove_columns(self, to_remove):
"""indexing from 0"""
nseq = ''
for i, s in enumerate(self.seq):
if i not in to_remove:
nseq += s
nss = ''
if self.ss:
for i, s in enumerate(self.ss):
if i not in to_remove:
nss += s
self.seq = nseq
self.ss = nss
def draw_ss(self, title='', verbose=False, resolution=1.5):
"""Draw secondary structure of RNA with VARNA.
VARNA: Visualization Applet for RNA
A Java lightweight component and applet for drawing the RNA secondary structure
.. image :: ../pngs/varna.png
Cite: VARNA: Interactive drawing and editing of the RNA secondary structure Kevin Darty, Alain Denise and Yann Ponty Bioinformatics, pp. 1974-197,, Vol. 25, no. 15, 2009
http://varna.lri.fr/"""
drawfn = tempfile.NamedTemporaryFile(delete=False).name + '.png'
SecondaryStructure.draw_ss(title, self.seq, self.ss, drawfn, resolution, verbose=verbose)
from IPython.display import Image
return Image(filename=drawfn)
def remove_gaps(self, check_bps=True, only_canonical=True, allow_gu=True):
"""Remove gaps from seq and secondary structure of the seq.
Args:
check_bps (bool) : fix mistakes as
only_canonical (bool) : keep in ss only pairs GC, AU
allow_gu (bool) : keep in ss also GU pair
.. image :: ../pngs/ss_misgap.png
A residue "paired" with a gap.
.. image :: ../pngs/ss_misgap_wrong.png
.. when check_bps (by default), then when removing gaps, the functions check is the gap is
paired with any residues (in the blue circle). If yes, then this residues is unpair (in this case ``)`` -> ``.``).
.. image :: ../pngs/ss_misgap_ok.png
if ``only_canonical`` (by default) is True then only GC, AU can be paired.
.. image :: ../pngs/ss_only_canonical.png
If ``allow_gu`` is False (be default is True) then GU pair is also possible.
.. image :: ../pngs/ss_no_gu.png
If you provide seq and secondary structure such as::
GgCcGGggG.GcggG.cc.u.aAUACAAuACCC.GaAA.GGGGAAUAaggCc.gGCc.gu......CU.......uugugcgGUuUUcaAgCccCCgGcCaCCcuuuu
(((((((((....((.((............(((......)))......))))..(((.(.....................)))).......)))))))))........
gaps will be remove as well.
"""
GAPS = ['-', '.']
nseq = ''
nss = ''
for (nt, nt_ss) in zip(self.seq, self.ss):
if nt in GAPS and nt_ss in GAPS:
pass
else:
nseq += nt
nss += nt_ss
self.seq = nseq
self.ss = nss
nss = list()
bps = self.ss_to_bps()
if check_bps:
nss = list(self.ss)
for bp in bps:
nt_left = self.seq[bp[0]]
nt_right = self.seq[bp[1]]
if nt_left == '-' or nt_right == '-':
nss[bp[0]] = '.'
nss[bp[1]] = '.'
self.ss = ''.join(nss)
if only_canonical:
nseq = list(self.seq)
nss = list(self.ss)
for bp in bps:
nt_left = nseq[bp[0]]
nt_right = nseq[bp[1]]
if (nt_left == 'A' and nt_right == 'U') or (nt_left == 'U' and nt_right == 'A'):
pass
elif (nt_left == 'G' and nt_right == 'C') or (nt_left == 'C' and nt_right == 'G'):
pass
elif (nt_left == 'G' and nt_right == 'U') or (nt_left == 'U' and nt_right == 'G'):
if allow_gu:
pass
else:
nss[bp[0]] = '.'
nss[bp[1]] = '.'
else:
nss[bp[0]] = '.'
nss[bp[1]] = '.'
self.ss = ''.join(nss)
# two?????? what is "two"?
nss = []
nseq = ''
for i, (c, s) in enumerate(zip(self.seq, self.ss)):
if c != '-':
nseq += c
nss.append(s)
self.seq = nseq
self.ss = ''.join(nss)
def ss_to_bps(self):
"""Convert secondary structure into a list of basepairs.
Returns:
bps (list): a list of base pairs, e.g. [[0, 80], [1, 79], [2, 78], [4, 77], [6, 75], [7, 74], ...]
"""
j = []
bps = []
pair_types = ['()', '[]', '<>', '{}']
for pair_type in pair_types:
for i, s in enumerate(self.ss):
if s == pair_type[0]:
j.append(i)
if s == pair_type[1]:
bps.append([j.pop(), i])
if len(j):
# if something left, this is a problem (!!!)
raise Exception('Mis-paired secondary structure')
bps.sort()
return bps
def get_conserved(self, consensus, start=0, to_pymol=True, offset=0):
"""Start
UCGGGGUGCCCUUCUGCGUG--------------------------------------------------AAGGC-UGAGAAAUACCCGU-------------------------------------------------AUCACCUG-AUCUGGAU-AAUGC
XXXXXXXXXXXXGXGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX----------------------------XXXXX-XCUGAGAXXXXXXXXXXXXXXXXXXXXXX----------------------------------XXXXXXXX-XXXXXXXX-ACXUG
"""
c = start + offset
index = []
print(self.seq)
print(consensus)
for nt_seq, nt_consensus in zip(self.seq, consensus):
if nt_consensus in ['-', 'X']:
pass
else:
index.append(c)
if nt_seq != '-':
c += 1
if to_pymol:
return("color red, " + str(index).replace(', ', '+').replace('[','').replace(']',''))
else:
return index
def get_distance_to(self, nseq):
"""Get distance of self.seq to nseq."""
return round(Levenshtein.ratio(self.seq, nseq), 2)
class RNAalignment(object):
"""RNA alignment - adapter class around BioPython to do RNA alignment stuff
Usage (for more see IPython notebook https://github.com/mmagnus/rna-tools/blob/master/rna_tools/tools/rna_alignment/rna_alignment.ipynb)
>>> a = RNAalignment('test_data/RF00167.stockholm.sto')
>>> print(a.tail())
>>> print(a.ss_cons)
Args:
fn (str): Filename
io (Bio.AlignIO): AlignIO.read(fn, "stockholm")
lines (list): List of all lines of fn
seqs (list): List of all sequences as class:`RNASeq` objects
rf (str): ReFerence annotation, the consensus RNA sequence
Read more:
- http://biopython.org/DIST/docs/api/Bio.AlignIO.StockholmIO-module.html
and on the format itself
- https://en.wikipedia.org/wiki/Stockholm_format
- http://sonnhammer.sbc.su.se/Stockholm.html
.. warning:: fetch requires urllib3
"""
def __init__(self, fn='', fetch=''):
if fetch:
import urllib3
http = urllib3.PoolManager()
response = http.request('GET', 'http://rfam.xfam.org/family/' +
fetch + '/alignment/stockholm?gzip=1&download=1')
if not response.status == 200:
raise RFAMFetchError(
"The alignment could not be downloaded. Please check the RFAM id that you requested! (don't put .stk etc in the id)")
with open(fetch + '.stk.gz', 'wb') as f:
f.write(response.data)
with gzip.open(fetch + '.stk.gz', 'rb') as f:
file_content = f.read()
with open(fetch + '.stk', 'wb') as f:
f.write(file_content)
fn = fetch + '.stk'
self.fn = fn
self.lines = open(fn).read().split('\n')
self.io = AlignIO.read(fn, "stockholm")
self.ss_cons = self.get_ss_cons()
self.ss_cons_pk = self.get_ss_cons_pk()
self.copy_ss_cons_to_all()
self._ss_cons_std = self.ss_cons
self.rf = self.get_gc_rf()
self.shift = self.get_shift_seq_in_align()
# get all lines not # nor //
# fix for blocked alignment
seq_lines = [l for l in self.lines if (
not l.startswith('#')) and (not l.startswith('//')) and (l)]
seqs_dict = OrderedDict()
for seq in seq_lines:
seq_id, seq_seq = seq.split()
if seq_id not in seqs_dict:
seqs_dict[seq_id] = seq_seq
else:
seqs_dict[seq_id] += seq_seq
self.seqs = []
for seq in seqs_dict:
self.seqs.append(RNASeq(seq, seqs_dict[seq], ss=self.ss_cons_with_pk_std))
# this is sick!
# I create a Cols object to be able to slice alignments
class Cols:
def __init__(self, alignment):
self.alignment = alignment
def __getitem__(self, i):
"""Return new alignment"""
if type(i) is list:
pass
else:
# collect "new" sequences
n_seqs = []
for s in self.alignment:
new_seq = RNASeq(s.id, s.seq[i], s.ss[i])
n_seqs.append(new_seq)
# this is not very smart :(
# save new seqs to a file
# and load it as RNAalignment
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.stk'
with open(tf.name, 'w') as f:
f.write('# STOCKHOLM 1.0\n')
for s in n_seqs:
f.write(' '.join([s.id, s.seq, '\n']))
# add ss_cons & //
f.write('#=GC SS_cons ' + self.alignment.ss_cons[i] + '\n')
if self.alignment.ss_cons_pk:
f.write('#=GC SS_cons_pk' + self.alignment.ss_cons_pk[i] + '\n')
f.write('#=GC RF ' + self.alignment.rf[i] + '\n')
f.write('//\n')
return RNAalignment(tf.name)
self.cols = Cols(self)
# ^^^^ sick ^^^^^^^^^^^
def reload_alignment(self):
tmpfn = tempfile.NamedTemporaryFile(delete=False).name
self.write(tmpfn)
self.io = AlignIO.read(tmpfn, "stockholm")
def __len__(self):
"""Return length of all sequenes."""
return len(self.seqs)
def __getitem__(self, i):
if type(i) is str:
for s in self:
if s.id == i:
return s
elif type(i) is list:
seqs = []
for j in i:
seqs.append(self.seqs[j])
return seqs
else:
return self.seqs[i]
@property
def ss_cons_std(self):
return get_rfam_ss_notat_to_dot_bracket_notat(self.ss_cons)
@ss_cons_std.setter
def ss_cons_std(self, ss):
self._ss_cons_std = ss
print(self._ss_cons_std)
def subset(self, ids, verbose=False):
"""Get subset for ids::
# STOCKHOLM 1.0
#=GF WK Tetrahydrofolate_riboswitch
..
AAQK01002704.1/947-1059 -U-GC-AAAAUAGGUUUCCAUGC..
#=GC SS_cons .(.((.((----((((((((((...
#=GC RF .g.gc.aGAGUAGggugccgugc..
//
"""
nalign = ''
for l in self.lines:
if l.startswith('//'):
nalign += l + '\n'
if l.startswith('#'):
nalign += l + '\n'
else:
for i in ids:
if l.startswith(i):
nalign += l + '\n'
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.stk'
print('Saved to ', tf.name)
if verbose:
print(nalign)
f = open(tf.name, 'w')
f.write(nalign)
#f.write(self.rf + '\n')
f.close()
return RNAalignment(tf.name)
def __add__(self, rna_seq):
self.seqs.append(rna_seq)
self.reload_alignment()
def write(self, fn, verbose=False):
"""Write the alignment to a file"""
if verbose:
print('Save to ', fn)
with open(fn, 'w') as f:
f.write('# STOCKHOLM 1.0\n')
shift = max([len(x) for x in [s.id for s in self.seqs] + ['#=GC=SS_cons']])
for s in self:
f.write(s.id.ljust(shift + 2, ' ') + s.seq + '\n')
f.write('#=GC SS_cons'.ljust(shift + 2, ' ') + self.ss_cons + '\n')
f.write('//')
def copy_ss_cons_to_all(self, verbose=False):
for s in self.io:
if verbose:
self.ss_cons
self.io[0].seq
try:
s.letter_annotations['secondary_structure'] = self.ss_cons
except TypeError:
raise Exception(
'Please check if all your sequences and ss lines are of the same length!')
s.ss = self.ss_cons
s.ss_clean = self.get_clean_ss(s.ss)
s.seq_nogaps = str(s.seq).replace('-', '')
s.ss_nogaps = self.get_ss_remove_gaps(s.seq, s.ss_clean)
def copy_ss_cons_to_all_editing_sequence(self, seq_id, before, after):
"""Change a sequence's sec structure.
:param seq_id: string, sequence id to change, eg: ``AE009948.1/1094322-1094400``
:param before: string, character to change from, eg: ``,``
:param after: string, character to change to, eg: ``.``
.. warning:: before and after has to be one character long
"""
for s in self.io:
if s.id == seq_id:
s.letter_annotations['secondary_structure'] = self.ss_cons.replace(before, after)
else:
s.letter_annotations['secondary_structure'] = self.ss_cons
def get_ss_remove_gaps(self, seq, ss):
"""
:param seq: string, sequence
:param ss: string, ss
UAU-AACAUAUAAUUUUGACAAUAUGG-GUCAUAA-GUUUCUACCGGAAUACC--GUAAAUAUUCU---GACUAUG-UAUA-
(((.(.((((,,,(((((((_______.))))))).,,,,,,,,(((((((__.._____))))))...),,)))).)))).
"""
nss = ''
for i, j in zip(seq, ss):
if i != '-':
nss += j
return nss
def plot(self, plot_fn='rchie.png'):
return RChie().plot_cov(self.io.format("fasta"), self.ss_cons_std, plot_fn)
def get_ss_cons_pk(self):
"""
:return: SS_cons_pk line or None if there is now SS_cons_pk:"""
ss_cons_pk = ''
for l in self.lines:
if l.startswith('#=GC SS_cons_pk'):
ss_cons_pk += l.replace('#=GC SS_cons_pk', '').strip()
return ss_cons_pk
def get_ss_cons(self):
"""
:return: SS_cons_pk line or None if there is now SS_cons_pk.
"""
ss_cons = ''
for l in self.lines:
if '#=GC SS_cons' in l and '#=GC SS_cons_pk' not in l:
ss_cons += l.replace('#=GC SS_cons', '').strip()
return ss_cons
@property
def ss_cons_with_pk(self):
"""go over ss_cons and overwrite bp is there is pk (ss_cons_pk)
ss_cons: (((.(.((((,,,(((((((_______.))))))).,,,,,,,,(((((((__.._____))))))...),,)))).)))).
ss_cons_pk: .......................[[...............................]]........................
ss_cons_with_pk: (((.(.((((,,,(((((((___[[__.))))))).,,,,,,,,(((((((__.._]]__))))))...),,)))).)))).
"return ss_cons_with_pk: string, e.g. (((.(.((((,,,(((((((___[[__.))))"""
if self.ss_cons_pk:
ss_cons_with_pk = ''
for i, (s, p) in enumerate(zip(self.ss_cons, self.ss_cons_pk)):
if p != '.':
ss_cons_with_pk += p
else:
ss_cons_with_pk += s
return ss_cons_with_pk
else:
return self.ss_cons
@property
def ss_cons_with_pk_std(self):
if self.ss_cons_pk:
ss_cons_with_pk_std = ''
for i, (s, p) in enumerate(zip(get_rfam_ss_notat_to_dot_bracket_notat(self.ss_cons), self.ss_cons_pk)):
if p != '.':
ss_cons_with_pk_std += p
else:
ss_cons_with_pk_std += s
return ss_cons_with_pk_std
else:
return self.ss_cons
def get_gc_rf(self):
"""Return (str) ``#=GC RF`` or '' if this line is not in the alignment.
"""
for l in self.lines:
if l.startswith('#=GC RF'):
return l.replace('#=GC RF', '').replace('_cons', '').strip()
else:
return ''
# raise RNAalignmentError('There is on #=GC RF in the alignment!')
def get_shift_seq_in_align(self):
"""RF_cons vs '#=GC RF' ???"""
for l in self.lines:
if l.startswith('#=GC RF'):
# #=GC RF .g.gc.a
l = l.replace('#=GC RF', '')
c = 7 # 12 # len of '#=GC RF'
# .g.gc.a
for i in l:
if i == ' ':
c += 1
self.shift = c
return c
def map_seq_on_seq(self, seq_id, seq_id_target, resis, v=True):
"""
:param seq_id: seq_id, 'AAML04000013.1/228868-228953'
:param seq_id_target: seq_id of target, 'CP000721.1/2204691-2204778'
:param resis: list resis, [5,6]
map::
[4, 5, 6]
UAU-A
UAU-AA
UAU-AAC
[5, 6, 7]
CAC-U
CAC-U-
CAC-U-U
[4, None, 5]
"""
# get number for first seq
print(resis)
nresis = []
for s in self.io:
if s.id.strip() == seq_id.strip():
for i in resis:
print(s.seq[:i + 1]) # UAU-A
nresis.append(i + s.seq[:i].count('-'))
print(nresis)
print(self.map_seq_on_align(seq_id_target, nresis))
return
resis_target = []
for s in self.io:
if s.id.strip() == seq_id_target.strip():
for i in nresis:
if v:
print(s.seq[:i])
if s.seq[i - 1] == '-':
resis_target.append(None)
else:
resis_target.append(i - s.seq[:i].count('-'))
return resis_target
def map_seq_on_align(self, seq_id, resis, v=True):
"""
:param seqid: seq_id, 'CP000721.1/2204691-2204775'
:param resis: list resis, [5,6]
maps::
[5, 6, 8]
CAC-U
CAC-U-
CAC-U-UA
[4, None, 6]
"""
if v:
print(resis)
nresis = []
for s in self.io:
if s.id.strip() == seq_id.strip():
for i in resis:
if v:
print(s.seq[:i])
if s.seq[i - 1] == '-':
nresis.append(None)
else:
nresis.append(i - s.seq[:i].count('-'))
return nresis
def head(self):
return '\n'.join(self.lines[:5])
def tail(self):
return '\n'.join(self.lines[-5:])
def describe(self):
"""Describe the alignment.
> print(a.describe())
SingleLetterAlphabet() alignment with 13 rows and 82 columns
"""
return str(self.io).split('\n')[0]
def remove_empty_columns(self, verbose=False):
"""Remove empty columns in place.
Example::
>>> a = RNAalignment("test_data/zmp.stk")
>>> print(a)
SingleLetterAlphabet() alignment with 6 rows and 319 columns
---ACCUUGCGCGACUGGCGAAUCC-------------------...AAU CP001644.1/756294-756165
--GCUCUCGCGCGACUGGCGACUUUG------------------...GAA CU234118.1/352539-352459
UGAGUUUUCUGCGACUGACGGAUUAU------------------...CUG BAAV01000055.1/2897-2982
GCCCGUUCGCGUGACUGGCGCUAGU-------------------...CGA CP000927.1/5164264-5164343
-----GGGUCGUGACUGGCGAACA--------------------...--- zmp
UCACCCCUGCGUGACUGGCGAUA---------------------...GUU AP009385.1/718103-718202
>>> a.remove_empty_columns()
>>> print(a)
SingleLetterAlphabet() alignment with 6 rows and 138 columns
---ACCUUGCGCGACUGGCGAAUCC-UGAAGCUGCUUUG-AGCG...AAU CP001644.1/756294-756165
--GCUCUCGCGCGACUGGCGACUUUG------------------...GAA CU234118.1/352539-352459
UGAGUUUUCUGCGACUGACGGAUUAU------------------...CUG BAAV01000055.1/2897-2982
GCCCGUUCGCGUGACUGGCGCUAGU-------------------...CGA CP000927.1/5164264-5164343
-----GGGUCGUGACUGGCGAACA--------G-----------...--- zmp
UCACCCCUGCGUGACUGGCGAUA--------GAACCCUCGGGUU...GUU AP009385.1/718103-718202
go over all seq
modifes self.nss_cons"""
cols_to_rm = []
# get only seqs
for i in range(len(self[0])):
gap = True
for seq in self:
if seq.seq[i] != '-':
gap = False
if gap:
cols_to_rm.append(i)
# remove from sequences
for s in self:
s.remove_columns(cols_to_rm)
# update io # hack #
tmpfn = tempfile.NamedTemporaryFile(delete=False).name
self.write(tmpfn, verbose=False)
self.io = AlignIO.read(tmpfn, "stockholm")
# nss_cons update
nss_cons = ''
for i, s in enumerate(self.ss_cons):
if i not in cols_to_rm:
nss_cons += s
self.ss_cons = nss_cons
def format_annotation(self, t):
return self.shift * ' ' + t
def find_core(self, ids=None):
"""Find common core for ids.
.. image:: ../pngs/find_core.png
Fig. By core, we understand columns that have all homologous residues. The core is here marked by `x`.
:param id: list, ids of seq in the alignment to use
"""
if not ids:
ids = []
for s in self.io:
ids.append(s.id)
xx = list(range(0, len(self.io[0])))
for i in range(0, len(self.io[0])): # if . don't use it
for s in self.io:
# print s.id
if s.id in ids:
if s.seq[i] == '-':
xx[i] = '-'
break
else:
xx[i] = 'x'
return ''.join(xx)
shift = self.get_shift_seq_in_align()
fnlist = open(self.fn).read().strip().split('\n')
fnlist.insert(-2, 'x' + ' ' * (shift - 1) + ''.join(xx))
# print fnlist
for l in fnlist:
print(l)
def find_seq(self, seq, verbose=False):
"""Find seq (also subsequences) and reverse in the alignment.
Args:
seq (str): seq is upper()
verbose (bool): be verbose
::
seq = "ggaucgcugaacccgaaaggggcgggggacccagaaauggggcgaaucucuuccgaaaggaagaguaggguuacuccuucgacccgagcccgucagcuaaccucgcaagcguccgaaggagaauc"
hit = a.find_seq(seq, verbose=False)
ggaucgcugaacccgaaaggggcgggggacccagaaauggggcgaaucucuuccgaaaggaagaguaggguuacuccuucgacccgagcccgucagcuaaccucgcaagcguccgaaggagaauc
Match: AL939120.1/174742-174619
ID: AL939120.1/174742-174619
Name: AL939120.1
Description: AL939120.1/174742-174619
Number of features: 0
/start=174742
/end=174619
/accession=AL939120.1
Per letter annotation for: secondary_structure
Seq('CCAGGUAAGUCGCC-G-C--ACCG---------------GUCA-----------...GGA', SingleLetterAlphabet())
GGAUCGCUGAACCCGAAAGGGGCGGGGGACCCAGAAAUGGGGCGAAUCUCUUCCGAAAGGAAGAGUAGGGUUACUCCUUCGACCCGAGCCCGUCAGCUAACCUCGCAAGCGUCCGAAGGAGAAUC
"""
seq = seq.replace('-', '').upper()
for s in self.io:
seq_str = str(s.seq).replace('-', '').upper()
if verbose:
print(seq_str)
if seq_str.find(seq) > -1 or seq.find(seq_str) > -1:
print('Match:', s.id)
print(s)
print(seq)
return s
print('Not found')
def find_seq_exact(self, seq, verbose=False):
"""Find seq (also subsequences) and reverse in the alignment.
:param seq: string, seq, seq is upper()
:param verbose: boolean, be verbose or not
"""
seq = seq.replace('-', '').upper()
for s in self.io:
seq_str = str(s.seq).replace('-', '').upper()
if verbose:
print(seq_str)
if seq_str == seq:
print('Match:', s.id)
print(s)
print(seq)
return s
print('Not found')
def get_clean_ss(self, ss):
nss = ''
for s in ss:
nss += get_rfam_ss_notat_to_dot_bracket_notat(s)
return nss
def get_seq_ss(self, seq_id): # seq,ss):
s = self.get_seq(seq_id).seq
# print seq, ss
# new
nseq = ''
nss = ''
for i, j in zip(seq, ss):
if i != '-': # gap
# print i,j
nseq += i
nss += get_rfam_ss_notat_to_dot_bracket_notat(j)
return nseq.strip(), nss.strip()
def get_seq(self, seq_id):
for s in self.io:
if s.id == seq_id:
return s
raise Exception('Seq not found')
def get_seq_with_name(self, seq_name):
for s in self.io:
if s.name == seq_name:
return s
raise Exception('Seq not found')
def align_seq(self, seq):
"""Align seq to the alignment.
Using self.rf.
Args:
seq (str): sequence, e.g. ``-GGAGAGUA-GAUGAUUCGCGUUAAGUGUGUGUGA-AUGGGAUGUC...``
Returns:
str: seq that can be inserted into alignemnt, ``.-.GG.AGAGUA-GAUGAUUCGCGUUA`` ! . -> -
"""
seq = list(seq)
seq.reverse()
nseq = ''
for n in self.rf: # n nuclotide
if n != '.':
try:
j = seq.pop()
except:
j = '.'
nseq += j
if n == '.':
nseq += '.' # + j
return nseq.replace('.', '-')
def __repr__(self):
return (str(self.io))
def trimmed_rf_and_ss(self):
"""Remove from RF and SS gaps.
Returns:
(str,str): trf, tss - new RF and SS
"""
trf = ''
tss = ''
for r, s in zip(self.rf, self.ss_cons_std):
if r not in ['-', '.']:
trf += r
tss += s
return trf, tss
def get_distances(self):
"""Get distances (seq identity) all-vs-all.
With BioPython.
blastn: ``Bad alphabet 'U' in sequence 'AE008922.1/409481-409568' at position '7'`` only for DNA?
read more (also about matrix at <http://biopython.org/wiki/Phylo> and
HTTP://biopython.org/DIST/docs/api/Bio.Phylo.TreeConstruction.DistanceCalculator-class.html
"""
calculator = DistanceCalculator('identity')
dm = calculator.get_distance(self.io)
return dm
def get_the_closest_seq_to_ref_seq(self, verbose=False):
"""
Example::
>>> a = RNAalignment("test_data/RF02221.stockholm.sto")
>>> a.get_the_closest_seq_to_ref_seq()
AF421314.1/431-344
"""
self + RNASeq('ConSeq', self.rf, '')
dist = self.get_distances()
distConSeq = dist['ConSeq'][:-1] # to remove ending 0, bc of distance to itself
minimal = min(distConSeq)
index = dist['ConSeq'].index(minimal)
#id = dist.names[index]
if verbose:
print('dist:\n', str(dist))
print('distConSeq:', dist['ConSeq'])
return self[index]
class CMAlign():
"""CMAalign class around cmalign (of Inferal).
cmalign - aligns the RNA sequences in <seqfile> to the covariance model
(CM) in <cmfile>. The new alignment is output to stdout in Stockholm
format.
Example::
cma = ra.CMAlign()
cma.run_cmalign("ade_seq.fa", "RF00167.cm")
seq = cma.get_seq()
print 'cma hit ', seq
print 'seq ', a.align_seq(seq)
print 'a.rf ', a.rf
cmd cmalign -g RF00167.cm ade_seq.fa
# STOCKHOLM 1.0
#=GF AU Infernal 1.1.2
ade ----------------CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAG-CCUUAAA-CUCUUGAUUAUGAAGUGA------------
#=GR ade PP ................99*********************************************.*******.***************999............
#=GC SS_cons :::::::::::::::::((((((((,,,<<<<<<<_______>>>>>>>,,,,,,,,<<<<<<<_______>>>>>>>,,))))))))::::::::::::::
#=GC RF aaaaaauaaaaaaaauucccuCgUAUAAucccgggAAUAUGGcccgggaGUUUCUACCaggcagCCGUAAAcugccuGACUAcGagggaaauuuuuuuuuuu
//
cma hit ----------------CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAG-CCUUAAA-CUCUUGAUUAUGAAGUGA------------
seq ----------------CGCU-U-CAUAUAAUCCUAAUGAUAUGG-UUUGGGA-GUUUCUACCAAGAG-CC--UUAAA-CUCUU---GAUUAUG-AAGUGA-------------
a.rf aaaaaauaaaaaaaauuccc.u.CgUAUAAucccgggAAUAUGG.cccggga.GUUUCUACCaggcagCC..GUAAAcugccu...GACUAcG.agggaaauuuuuuuuuuu.
Install http://eddylab.org/infernal/
Cite: Nawrocki and S. R. Eddy, Infernal 1.1: 100-fold faster RNA homology searches, Bioinformatics 29:2933-2935 (2013). """
def __init__(self, outputfn=None):
"""Use run_cmalign or load cmalign output from a file"""
if outputfn:
self.output = open(outputfn).read().strip().split('\n')
def run_cmalign(self, seq, cm, verbose=True):
"""Run cmalign and process the result.
:param seq: seq string
:param cm: cm fn
Run::
$ cmalign RF01831.cm 4lvv.seq
# STOCKHOLM 1.0
#=GF AU Infernal 1.1.2
4lvv -GGAGAGUA-GAUGAUUCGCGUUAAGUGUGUGUGA-AUGGGAUGUCG-UCACACAACGAAGC---GAGA---GCGCGGUGAAUCAUU-GCAUCCGCUCCA
#=GR 4lvv PP .********.******************9999998.***********.8999999******8...5555...8**************.************
#=GC SS_cons (((((----(((((((((((,,,,,<<-<<<<<<<<___________>>>>>>>>>>,,,<<<<______>>>>,,,)))))))))))-------)))))
#=GC RF ggcaGAGUAGggugccgugcGUuAAGUGccggcgggAcGGGgaGUUGcccgccggACGAAgggcaaaauugcccGCGguacggcaccCGCAUcCgCugcc
//
.. warning :: requires cmalign to be set in your shell
"""
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.seq'
with open(tf.name, 'w') as f:
f.write('>target\n')
f.write(seq + '\n')
cmd = 'cmalign -g ' + cm + ' ' + tf.name # global
if verbose:
print('cmd' + cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = o.stdout.read().strip()
stderr = o.stderr.read().strip()
if verbose:
print(stdout)
self.output = stdout.split('\n')
def get_gc_rf(self):
"""Get ``#=GC RF``.
:var self.output: string
"""
for l in self.output:
if l.startswith('#=GC RF'):
return l.replace('#=GC RF', '').strip()
def get_seq(self):
"""
:var self.output: output of cmalign, string
"""
for l in self.output:
if l.strip():
if not l.startswith('#'):
# 4lvv -GGAGAGUA-GAUGAU
return l.split()[1].strip()
def clean_seq_and_ss(seq, ss):
nseq = ''
nss = ''
for i, j in zip(seq, ss):
if i != '-': # gap
# print i,j
nseq += i
nss += get_rfam_ss_notat_to_dot_bracket_notat_per_char(j)
return nseq.strip(), nss.strip()
def get_rfam_ss_notat_to_dot_bracket_notat_per_char(c):
"""Take (c)haracter and standardize ss (including pks in letter (AAaa) notation).
.. warning:: DD DD will be treated as BB BB (<< >>) (it might be wrong!)"""
if c in [',', '_', ':', '-']:
return '.'
if c == '<':
return '('
if c == '{':
return '('
if c == '}':
return ')'
if c == ']':
return ')'
if c == '[':
return '('
if c == '>':
return ')'
if c == '>':
return ')'
if c == 'A':
return '['
if c == 'a':
return ']'
if c == 'B':
return '<'
if c == 'b':
return '>'
if c == 'C':
return '{'
if c == 'c':
return '}'
# !!!!!!!!!!!!!!!!!!!!!!!!!!
if c == 'D':
return '<'
if c == 'd':
return '>'
# !!!!!!!!!!!!!!!!!!!!!!!!!!
return c
def get_rfam_ss_notat_to_dot_bracket_notat(ss):
"""Change all <<>> to "standard" dot bracket notation.
Works also with pseudknots AA BB CC etc."""
nss = ''
for s in ss:
ns = get_rfam_ss_notat_to_dot_bracket_notat_per_char(s)
nss += ns
return nss
def fasta2stokholm(fn):
"""Take a gapped fasta file and return an RNAalignment object.
A fasta file should look like this::
>AE009948.1/1094322-1094400
UAC-U-UAUUUAUGCUGAGGAU--UGG--CUUAGC-GUCUCUACAAGACA-CC--GU-AA-UGUCU---AACAAUA-AGUA-
...
>CP000721.1/2204691-2204778
CAC-U-UAUAUAAAUCUGAUAAUAAGG-GUCGGAU-GUUUCUACCAAGCUACC--GUAAAUUGCUAUAGGACUAUA-AGUG-
>SS_cons
(((.(.((((...(((((((........))))))).........(((((((.........))))))...)..)))).)))).
>SS_cons_pk
.........................[[........................]].............................
``SS_cons_pk`` in optionally and is an extra line used to define a pseudoknot. You
can also define second psedoknot as ``<<<...>>>`` and the third one with ``{{{ }}}``.
:param fn: file
:return: RNAalignment object
"""
seqs = []
s = None
for l in open(fn):
if l.startswith('>'):
if s:
seqs.append(s)
s = RNASeq(l.replace('>', '').strip(), '')
else:
s.seq += l.strip()
txt = ''
for l in open(fn):
if l.startswith('>'):
id = '\n' + l.replace('>', '\n').strip() + ' '
id = re.sub('ss_cons_pk', '#=GC SS_cons_pk', id, flags=re.IGNORECASE)
id = re.sub('ss_cons', '#=GC SS_cons', id, flags=re.IGNORECASE)
txt += id
else:
txt += l.strip()
txt = txt.strip() + '\n' # clean upfront \n and add tailing \n
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.stk'
with open(tf.name, 'w') as f:
f.write('# STOCKHOLM 1.0\n')
f.write(txt)
f.write('//\n')
return RNAalignment(tf.name)
def fetch_stokholm(rfam_acc, dpath=None):
"""Fetch Stokholm file from Rfam.
:param rfam_acc: str, Rfam accession number, eg. RF00028
:param dpath: str or None, if None saves to current location, otherwise save to dpath folder
.. warning :: urllib3 is required, pip install urllib3
"""
import urllib3
http = urllib3.PoolManager()
# try:
print(dpath)
response = http.request('GET', url='http://rfam.xfam.org/family/' + rfam_acc.lower() +
'/alignment?acc=' + rfam_acc.lower() + '&format=stockholm&download=1')
# except urllib3.HTTPError:
# raise Exception('The PDB does not exists: ' + pdb_id)
txt = response.data
if dpath is None:
npath = rfam_acc + '.stk'
else:
npath = dpath + os.sep + rfam_acc + '.stk'
print('downloading...' + npath)
with open(npath, 'wb') as f:
f.write(txt)
print('ok')
return rfam_acc + '.stk'
# def test_seq_multilines_alignment()
def test_alignment_with_pk():
a = RNAalignment('test_data/RF00167.stockholm.sto')
print(a.tail())
print(a.ss_cons)
print(a.ss_cons_pk)
print(a.ss_cons_with_pk)
print(a[[1, 2]])
# main
if __name__ == '__main__':
## a = RNAalignment('test_data/RF00167.stockholm.sto')
# print a.get_shift_seq_in_align()
# print a.map_seq_on_align('CP000721.1/2204691-2204778', [5,6,8])
# print a.map_seq_on_seq('AAML04000013.1/228868-228953', 'CP000721.1/2204691-2204778', [4,5,6])
# print(record.letter_annotations['secondary_structure'])
## seq = a.get_seq('AAML04000013.1/228868-228953')
# print seq.seq
# print seq.ss
# print a.ss_cons
# print 'x'
# for s in a.io:
# print s.seq
# print s.ss_clean #letter_annotations['secondary_structure']
# for s in a.io:
# print s.seq_nogaps
# print s.ss_nogaps
# a.write('test_output/out.sto')
# a = RNAalignment("/home/magnus/work/rna-evo/rp12/seq/RF00379.stockholm.stk")##_rm85.stk')
# print a.get_seq_ss("AL939120.1/174742-174619")
# subset = a.subset(["AL939120.1/174742-174619",
# "rp12bsubtilis",
# "AAWL01000001.1/57092-56953",
# "CP000612.1/87012-87130",
# "BA000028.3/1706087-1706245"])
# % cat /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpTzPenx
# for s in subset:
# print s.seq
# subset.remove_empty_columns()
# subset.write('/home/magnus/out2.stk')
# for s in subset:
# print s.seq
# print 'subset.ss_cons_std:', subset.ss_cons_std
# a = RNAalignment("/home/magnus/work/rna-evo/rp12/seq/RF00379.stockholm.stk")##_rm85.stk')
# a.ss_cons = "::::::::::{{{{,,,<<<_..."
# print a.ss_cons
# print a.ss_cons_std
# print get_rfam_ss_notat_to_dot_bracket_notat(subset.ss_cons)
## a.ss_cons_std = 'test of setter'
# print a._ss_cons_std
# pass
# slice
## slices = a.cols[0:10]
# for s in a:
# print s.seq
# add pk
# for s in slices:
# print s, s.seq, s.ss
#a = fasta2stokholm('test_output/ade_gapped.fa')
# print a
#a = RNAalignment('test_data/RF00001.blocked.stk')
# print a
# print a.ss_cons
# a.plot('rchie.png')
# a = RNAalignment("test_data/RF02221.stockholm.sto")
## a = RNAalignment('test_data/test_data/RF02221.stockholm.sto')
## a + RNASeq('ConSeq', '-A-GU-AGAGUA-GGUCUUAUACGUAA-----------------AGUG-UCAUCGGA-U-GGGGAGACUUCCGGUGAACGAA-G-G-----------------------------GUUA---------------------------CCGCGUUAUAUGAC-C-GCUUCCG-CUA-C-U-','')
## dist = a.get_distances()
# distConSeq = dist['ConSeq'][:-1] # to remove ending 0, bc of distance to itself
## minimal = min(distConSeq)
## index = dist['ConSeq'].index(minimal)
# print(dist.names[index])
a = RNAalignment("test_data/dist_test2.stk")
rep = a.get_the_closest_seq_to_ref_seq()
rep.remove_gaps()
print(rep)
print(rep.ss)
print(rep.seq)
# a.write('tmp.stk')
# s.remove_gaps()
# print(s.seq)
# print(s.ss)
import doctest
doctest.testmod()
| m4rx9/rna-pdb-tools | rna_tools/tools/rna_alignment/rna_alignment.py | Python | mit | 45,244 | [
"Biopython"
] | 28b7cbb9d31ed427759a6b5e1f435eb62da3fb2595483a1218f180e287d8aebf |
"""
Tests for xhr_handlers.py.
"""
import json
import os
import re
import StringIO
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.files.uploadedfile import UploadedFile
from django.core.urlresolvers import reverse
from django.http.request import HttpRequest
from django.test import Client
from django.test import TestCase
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import Project
from main.models import ReferenceGenome
from main.models import Variant
from main.models import VariantSet
from main.models import VariantAlternate
from main.models import VariantCallerCommonData
from main.models import VariantEvidence
from main.testing_util import create_common_entities
from main.testing_util import TEST_EMAIL
from main.testing_util import TEST_PASSWORD
from main.testing_util import TEST_USERNAME
from main.xhr_handlers import create_ref_genome_from_browser_upload
from main.xhr_handlers import create_variant_set
from main.xhr_handlers import ref_genomes_concatenate
from main.xhr_handlers import samples_upload_through_browser_sample_data
from main.xhr_handlers import upload_single_sample
from main.xhr_handlers import VARIANT_LIST_REQUEST_KEY__FILTER_STRING
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__ERROR
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__LIST
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__TOTAL
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__TIME
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__SET_LIST
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__KEY_MAP
from pipeline.pipeline_runner import run_pipeline
from variants.dynamic_snp_filter_key_map import update_filter_key_map
from utils.import_util import _create_sample_and_placeholder_dataset
from utils.import_util import add_dataset_to_entity
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__READ_1
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__READ_2
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME
from settings import PWD as GD_ROOT
from variants.melted_variant_schema import MELTED_SCHEMA_KEY__POSITION
TEST_DIR = os.path.join(GD_ROOT, 'test_data', 'genbank_aligned')
TEST_ANNOTATED_VCF = os.path.join(TEST_DIR, 'bwa_align_annotated.vcf')
TEST_MG1655_GENBANK = os.path.join(TEST_DIR, 'mg1655_tolC_through_zupT.gb')
STATUS_CODE__NOT_FOUND = 404
STATUS_CODE__NOT_LOGGED_IN = 302
STATUS_CODE__SUCCESS = 200
TEST_FQ_DIR = os.path.join(
GD_ROOT,
'test_data',
'fake_genome_and_reads',
'9b19e708')
TEST_FQ1_FILE = os.path.join(TEST_FQ_DIR,
'test_genome_2.snps.simLibrary.1.fq')
TEST_FQ2_FILE = os.path.join(TEST_FQ_DIR,
'test_genome_2.snps.simLibrary.2.fq')
TEST_FA_DIR = os.path.join(
GD_ROOT,
'test_data',
'genome_finish_test')
TEST_FASTA_1_PATH = os.path.join(TEST_FA_DIR, 'random_fasta_1.fa')
TEST_FASTA_2_PATH = os.path.join(TEST_FA_DIR, 'random_fasta_2.fa')
TEST_2_CHROM_FASTA_PATH = os.path.join(GD_ROOT, 'test_data', 'two_chromosome.fa')
LONG_ID_GENBANK = os.path.join(
GD_ROOT,
'test_data',
'long_id_genbank.gb')
TEST_GENBANK = os.path.join(
GD_ROOT,
'test_data',
'test_genbank.gb')
TEST_DIRTY_FQ_1 = os.path.join(
GD_ROOT,
'test_data',
'dirty_genbank_reads.1.fq')
TEST_DIRTY_FQ_2 = os.path.join(
GD_ROOT,
'test_data',
'dirty_genbank_reads.2.fq')
class TestGetVariantList(TestCase):
url = reverse('main.xhr_handlers.get_variant_list')
def setUp(self):
# Useful models.
user = User.objects.create_user(TEST_USERNAME, password=TEST_PASSWORD,
email=TEST_EMAIL)
self.project = Project.objects.create(owner=user.get_profile(),
title='Test Project')
self.ref_genome = ReferenceGenome.objects.create(project=self.project,
label='refgenome')
self.chromosome = Chromosome.objects.create(
reference_genome=self.ref_genome,
label='Chromosome',
num_bases=9001)
self.sample_obj_1 = ExperimentSample.objects.create(
project=self.project, label='fake sample')
# Make sure the reference genome has the required vcf keys.
update_filter_key_map(self.ref_genome, TEST_ANNOTATED_VCF)
self.vcf_dataset = Dataset.objects.create(
label='test_data_set',
type=Dataset.TYPE.VCF_FREEBAYES,
filesystem_location=TEST_ANNOTATED_VCF)
# Fake web browser client used to make requests.
self.client = Client()
self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)
def test__logged_out(self):
"""Test that logged out fails.
"""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(STATUS_CODE__NOT_LOGGED_IN, response.status_code)
def test__missing_params(self):
response = self.client.get(self.url)
self.assertEqual(STATUS_CODE__NOT_FOUND, response.status_code)
def test__basic_function(self):
"""Basic test.
"""
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
TOTAL_NUM_VARIANTS = 10
for pos in range(TOTAL_NUM_VARIANTS):
# We need all these models for testing because this is what the
# materialized view create requires to return non-null results.
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome,
chromosome=self.chromosome,
position=pos,
ref_value='A')
VariantAlternate.objects.create(
variant=variant,
alt_value='G')
common_data_obj = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset=self.vcf_dataset,
alignment_group=alignment_group)
VariantEvidence.objects.create(
experiment_sample=self.sample_obj_1,
variant_caller_common_data=common_data_obj)
# Sanity check that the Variants were actually created.
self.assertEqual(TOTAL_NUM_VARIANTS, Variant.objects.filter(
reference_genome=self.ref_genome).count())
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
# Make sure expected keys in response.
EXPECTED_RESPONSE_KEYS = set([
VARIANT_LIST_RESPONSE_KEY__LIST,
VARIANT_LIST_RESPONSE_KEY__TOTAL,
VARIANT_LIST_RESPONSE_KEY__SET_LIST,
VARIANT_LIST_RESPONSE_KEY__TIME,
VARIANT_LIST_RESPONSE_KEY__KEY_MAP,
])
self.assertEqual(EXPECTED_RESPONSE_KEYS, set(response_data.keys()),
"Missing keys %s\nGot keys %s" % (
str(EXPECTED_RESPONSE_KEYS -
set(response_data.keys())),
str(set(response_data.keys()))))
self.assertEqual(TOTAL_NUM_VARIANTS,
response_data[VARIANT_LIST_RESPONSE_KEY__TOTAL])
# Check total variants returned is correct.
variant_data_obj = json.loads(response_data[
VARIANT_LIST_RESPONSE_KEY__LIST])
variant_obj_list = variant_data_obj['obj_list']
self.assertTrue(TOTAL_NUM_VARIANTS, len(variant_obj_list))
# Check positions are correct.
def _get_position_from_frontend_object(fe_obj):
return int(re.match('([0-9]+)', str(fe_obj[
MELTED_SCHEMA_KEY__POSITION])).group(1))
variant_position_set = set([_get_position_from_frontend_object(obj)
for obj in variant_obj_list])
self.assertEqual(set(range(TOTAL_NUM_VARIANTS)), variant_position_set)
def test_melted(self):
"""Test melted view.
"""
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
TOTAL_NUM_VARIANTS = 10
for pos in range(TOTAL_NUM_VARIANTS):
# We need all these models for testing because this is what the
# materialized view create requires to return non-null results.
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome,
chromosome=self.chromosome,
position=pos,
ref_value='A')
VariantAlternate.objects.create(
variant=variant,
alt_value='G')
common_data_obj = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset=self.vcf_dataset,
alignment_group=alignment_group,
data={u'INFO_DP': 20, u'INFO_PQR': 0.0})
VariantEvidence.objects.create(
experiment_sample=self.sample_obj_1,
variant_caller_common_data=common_data_obj)
# Sanity check that the Variants were actually created.
self.assertEqual(TOTAL_NUM_VARIANTS, Variant.objects.filter(
reference_genome=self.ref_genome).count())
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid,
'melt': '1'
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
# Make sure expected keys in response.
EXPECTED_RESPONSE_KEYS = set([
VARIANT_LIST_RESPONSE_KEY__LIST,
VARIANT_LIST_RESPONSE_KEY__TOTAL,
VARIANT_LIST_RESPONSE_KEY__SET_LIST,
VARIANT_LIST_RESPONSE_KEY__TIME,
VARIANT_LIST_RESPONSE_KEY__KEY_MAP,
])
self.assertEqual(EXPECTED_RESPONSE_KEYS, set(response_data.keys()),
"Missing keys %s\nGot keys %s" % (
str(EXPECTED_RESPONSE_KEYS -
set(response_data.keys())),
str(set(response_data.keys()))))
self.assertEqual(TOTAL_NUM_VARIANTS,
response_data[VARIANT_LIST_RESPONSE_KEY__TOTAL])
# Check total variants returned is correct.
variant_data_obj = json.loads(response_data[
VARIANT_LIST_RESPONSE_KEY__LIST])
variant_obj_list = variant_data_obj['obj_list']
self.assertTrue(TOTAL_NUM_VARIANTS, len(variant_obj_list))
# Check positions are correct.
def _get_position_from_frontend_object(fe_obj):
return int(re.match('([0-9]+)', str(fe_obj[
MELTED_SCHEMA_KEY__POSITION])).group(1))
variant_position_set = set([_get_position_from_frontend_object(obj)
for obj in variant_obj_list])
self.assertEqual(set(range(TOTAL_NUM_VARIANTS)), variant_position_set)
def test_does_not_throw_500_on_server_error(self):
"""For user input errors, get_variant_list should not throw a 500 error.
This test might fail if the dev leaves the debugging clause
"except FakeException" in the code.
"""
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid,
VARIANT_LIST_REQUEST_KEY__FILTER_STRING: 'nonesense'
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
self.assertTrue(VARIANT_LIST_RESPONSE_KEY__ERROR in response_data)
# Make sure FakeException is not imported
with self.assertRaises(ImportError):
# Don't leave FakeException as import.
from main.xhr_handlers import FakeException
class TestModifyVariantInSetMembership(TestCase):
"""Tests for the modify_variant_in_set_membership() xhr endpoint.
"""
def test_add__variants_specified(self):
"""Tests adding a specific list.
"""
# TODO: Implement.
pass
def test_add__all_matching_filter(self):
"""Test adding all matching filter.
"""
# TODO: Implement.
pass
class TestUploadSingleSample(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_single_sample(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
EXPERIMENT_SAMPLE_LABEL = 'my sample'
request.POST['sampleLabel'] = EXPERIMENT_SAMPLE_LABEL
request.FILES['fastq1'] = UploadedFile(
file=open(TEST_FQ1_FILE),
name='read1.fq')
request.FILES['fastq2'] = UploadedFile(
file=open(TEST_FQ2_FILE),
name='read2.fq')
response = upload_single_sample(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
sample = ExperimentSample.objects.get(label=EXPERIMENT_SAMPLE_LABEL)
self.assertTrue(sample)
datasets = sample.dataset_set.all()
# num_datasets: 2 * fastq plus 2 * fastqc = 4
self.assertEqual(4, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
def test_upload_single_sample__unpaired(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
EXPERIMENT_SAMPLE_LABEL = 'my sample'
request.POST['sampleLabel'] = EXPERIMENT_SAMPLE_LABEL
request.FILES['fastq1'] = UploadedFile(
file=open(TEST_FQ1_FILE),
name='read1.fq')
response = upload_single_sample(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
sample = ExperimentSample.objects.get(label=EXPERIMENT_SAMPLE_LABEL)
self.assertTrue(sample)
datasets = sample.dataset_set.all()
# num_datasets: 1 fastq + 1 fastqc = 2
self.assertEqual(2, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
class TestSamplesUploadThroughBrowserSampleData(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_file(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
# Fake having uploaded a template.
row_data = {
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME: 'red',
SAMPLE_BROWSER_UPLOAD_KEY__READ_1: TEST_FQ1_FILE,
SAMPLE_BROWSER_UPLOAD_KEY__READ_2: TEST_FQ2_FILE
}
_create_sample_and_placeholder_dataset(project, row_data)
datasets = Dataset.objects.all()
self.assertEqual(2, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.AWAITING_UPLOAD, dataset.status)
def _upload_file_and_check_response(full_path):
name = os.path.split(full_path)[1]
# Add mock file to request.
mock_uploaded_file = UploadedFile(
file=open(TEST_FQ1_FILE),
name=name)
request.FILES = {
'file': mock_uploaded_file
}
response = samples_upload_through_browser_sample_data(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
_upload_file_and_check_response(TEST_FQ1_FILE)
_upload_file_and_check_response(TEST_FQ2_FILE)
datasets = Dataset.objects.all()
# 2 fastq, 2 fastqc
self.assertEqual(4, len(datasets))
EXPECTED_DATASET_TYPES_SET = set([
Dataset.TYPE.FASTQ1, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQC1_HTML, Dataset.TYPE.FASTQC2_HTML])
self.assertEqual(
EXPECTED_DATASET_TYPES_SET,
set([ds.type for ds in datasets]))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
class TestVariantSetUploadThroughFile(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_file(self):
VARIANT_SET_NAME = 'newVariant'
self.assertEqual(0, VariantSet.objects.count())
refGenome = self.common_entities['reference_genome']
request = HttpRequest()
request.POST = {
'refGenomeUid': refGenome.uid,
'variantSetName': VARIANT_SET_NAME,
'createSetType': 'from-file'
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
#random test file selected
variant_set_file = os.path.join(GD_ROOT, 'test_data',
'recoli_321UAG_variant_set_upload.vcf')
mock_uploaded_file = UploadedFile(
file=StringIO.StringIO(),
name=variant_set_file)
request.FILES['vcfFile'] = mock_uploaded_file
response = create_variant_set(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
variantsets = VariantSet.objects.all()
self.assertEqual(1, len(variantsets))
self.assertEqual(VARIANT_SET_NAME, VariantSet.objects.get().label)
self.assertEqual(refGenome, VariantSet.objects.get().reference_genome)
class TestReferenceGenomeConcatenation(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def _generate_test_instance(self, rg_files, rg_names=None):
if rg_names is None:
rg_names = [str(i) for i in range(len(rg_files))]
project = self.common_entities['project']
ref_genomes = []
for i, rg_file in enumerate(rg_files):
file_type = 'fasta' if rg_file.endswith('.fa') else 'genbank'
ref_genomes.append(import_reference_genome_from_local_file(
project, rg_names[i], rg_file, file_type, move=False))
test_label = 'concat_test'
request_data = {
'newGenomeLabel': test_label,
'refGenomeUidList': [rg.uid for rg in ref_genomes]
}
request = HttpRequest()
request.POST = {'data': json.dumps(request_data)}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
ref_genomes_concatenate(request)
concat_ref = ReferenceGenome.objects.get(label=test_label)
# Assert correct number of chromosomes
self.assertEqual(
concat_ref.num_chromosomes,
sum([rg.num_chromosomes for rg in ref_genomes]))
# Assert correct number of bases
self.assertEqual(
concat_ref.num_bases,
sum([rg.num_bases for rg in ref_genomes]))
def test_fasta_concatenation(self):
""" Basic test of concatenating two short fastas
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_FASTA_2_PATH])
def test_identical_fasta_concatenation(self):
""" Test concatenating two identical fastas
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_FASTA_1_PATH])
def test_fasta_genbank_concatenation(self):
""" Test concatenating a fasta with a genbank
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_MG1655_GENBANK])
def test_multichromosome_concatenation(self):
""" Test concatenating a fasta containing a single chromosome with
a fasta containing two chromosomes
"""
self._generate_test_instance([TEST_FASTA_1_PATH,
TEST_2_CHROM_FASTA_PATH])
class TestUploadReferenceGenome(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_long_id_genbank(self):
project = self.common_entities['project']
ref_genome_label = 'dirty_upload'
request = HttpRequest()
request.POST = {
'projectUid': project.uid,
'refGenomeLabel': ref_genome_label,
'importFileFormat': 'genbank'
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
request.FILES['refGenomeFile'] = UploadedFile(
file=open(LONG_ID_GENBANK),
name='dirty_genbank.gb')
response = create_ref_genome_from_browser_upload(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse(json.loads(response.content).get('error', False))
def test_run_alignment_with_spaces_in_genbank_filename(self):
project = self.common_entities['project']
ref_genome_label = 'dirty_upload'
request = HttpRequest()
request.POST = {
'projectUid': project.uid,
'refGenomeLabel': ref_genome_label,
'importFileFormat': 'genbank'
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=TEST_PASSWORD)
self.assertTrue(request.user.is_authenticated())
request.FILES['refGenomeFile'] = UploadedFile(
file=open(TEST_GENBANK),
name='dirty_genbank (spaces).gb')
response = create_ref_genome_from_browser_upload(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse(json.loads(response.content).get('error', False))
# Get reference genome
ref_genome = ReferenceGenome.objects.get(
project=project,
label=ref_genome_label)
# Create sample model
sample = ExperimentSample.objects.create(
project=project,
label='test_sample')
# Add fastq datasets to sample
add_dataset_to_entity(
sample,
Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1,
filesystem_location=TEST_DIRTY_FQ_1)
# Add fastq datasets to sample
add_dataset_to_entity(
sample,
Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2,
filesystem_location=TEST_DIRTY_FQ_2)
# Run alignment of sample to reference
alignment_group_label = 'test_alignment'
sample_list = [sample]
result = run_pipeline(
alignment_group_label, ref_genome, sample_list)
alignment_group = result[0]
alignment_async_result = result[1]
variant_calling_async_result = result[2]
alignment_async_result.get()
variant_calling_async_result.get()
alignment_group = AlignmentGroup.objects.get(uid=alignment_group.uid)
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group.status)
| woodymit/millstone | genome_designer/main/tests/test_xhr_handlers.py | Python | mit | 25,193 | [
"BWA"
] | 4aeddfc313aedaea480c7e70e1a0a466fd373c2ff0dfaba934000c133a642ab1 |
# ===========================================================
# RV PLOTS
# ===========================================================
# ===========================================================
# Plot RV timeseries
# ===========================================================
def plot_rv_timeseries():
# -------------------------------------------------------------------------------
# Convert factor from km to m
cfactor = np.float(1.e3)
data_rv_inst = [None]*nt
data_erv_inst = [None]*nt
for i in range(0, nt):
data_rv_inst[i] = list(rv_all[i])
data_erv_inst[i] = list(errs_all[i])
# -------------------------------------------------------------------------------
# CREATE MODEL TO PLOT
# MODEL LENGHT IS THE WHOLE RANGE +/- 10%
xmin = min(np.concatenate(time_all))
xmax = max(np.concatenate(time_all))
total_tt = int(xmax - xmin)
add = total_tt*0.1
xmax = xmax + add
xmin = xmin - add
n = total_tt*100
if (n > 5000):
n = 5000
# CREATE TIME VECTOR
rvx = np.arange(xmin, xmax, (xmax-xmin)/n)
# COMPUTE MODEL WITH ALL THE PLANETS
rvy = pti.rv_curve_mp(rvx, 0.0, t0_val, k_val, P_val,
e_val, w_val, alpha_val, beta_val)
# -------------------------------------------------------------------------------
# CORRECT DATA FOR EACH SPECTROGRAPH OFFSET
rv_no_offset = [None]*nt
rv_residuals = [None]*nt
for j in range(0, nt):
# Remove the compute offset for instrument j
rv_no_offset[j] = np.asarray(data_rv_inst[j] - v_val[j])
# Store the model with the planets to compute the residuals for instrument j
rv_residuals[j] = pti.rv_curve_mp(time_all[j], 0.0, t0_val,
k_val, P_val, e_val, w_val, alpha_val, beta_val)
# Compute the residuals for the instrument j
rv_residuals[j] = np.asarray(rv_no_offset[j] - rv_residuals[j])
# -------------------------------------------------------------------------------
if kernel_rv[0:2] == 'MQ' or kernel_rv[0:2] == 'ME' or kernel_rv[0:2] == 'MM':
# How many timeseries do we have?
#ns = int((len(fit_krv) - 3)/2)
ns = int(kernel_rv[2])
# This vector contains all the timeseries (a TxN vector)
xvec = rv_time
# This vector contains the predicted timeseries for N cases
rvx_tot = np.concatenate([rvx]*ns)
# Let us create our vector with the residuals for the N timeseries
yvec = [None]*len(rv_vals)
# Our first chunk of timeseries is always the RV, in this case, we have to
# Create the residuals once removed the planet signals
yvec_noplanet = np.concatenate(rv_no_offset)
yvec_planet = np.concatenate(rv_residuals)
#The first elements of the array correspond to the GP, so we want the vector with no planets and no offset
for i in range(int(len(rv_vals)/ns)):
yvec[i] = yvec_planet[i]
# Now, let us store the ancilliary data vectors, so we want to remove only the offsets
for i in range(int(len(rv_vals)/ns), int(len(rv_vals))):
yvec[i] = yvec_noplanet[i]
evec = rv_errs
# Now, predict the GP for all the timeseries
m, C = pti.pred_gp(kernel_rv, pk_rv, xvec, yvec,
evec, rvx_tot, jrv, jrvlab)
# Let us remove the GP model from the data
m_gp, C_gp = pti.pred_gp(
kernel_rv, pk_rv, xvec, yvec, evec, xvec, jrv, jrvlab)
#Now we can remove the GP model to the data
yvec = yvec - m_gp
# -----------------------------------------------------------------------------------
# Let us create random samples of data
if False:
nsamples = 1000
for j in range(0, nsamples):
# Create the Gaussian samples
y_dummy = np.random.normal(yvec, evec, len(yvec))
out_f = outdir+'/'+star+'_rv_'+str(j)+'.dat'
opars = open(out_f, 'w')
for k in range(0, len(yvec)):
opars.write('%4.7f %4.7f %4.7f %s \n' % (
xvec[k], y_dummy[k], evec[k], telescopes[tlab[k]]))
opars.close()
# -----------------------------------------------------------------------------------
#
# Let us create the vectors that we will use for the plots
plot_vector = [None]*ns
nts = len(rvx)
# This corresponds to the RV timeseries
plot_vector[0] = [rvx, (rvy)*cfactor, m[0:nts]
* cfactor, (rvy+m[0:nts])*cfactor,np.sqrt(np.matrix.diagonal(C[0:nts,0:nts]))*cfactor]
for i in range(1, ns):
plot_vector[i] = [rvx, m[i*nts:(i+1)*nts]*cfactor,np.sqrt(np.matrix.diagonal(C[i*nts:(i+1)*nts,i*nts:(i+1)*nts]))*cfactor]
# are we plotting a GP together with the RV curve
elif kernel_rv[0:2] != 'No':
xvec = rv_time
yvec = np.concatenate(rv_residuals)
evec = rv_errs
m, C = pti.pred_gp(kernel_rv, pk_rv, xvec,
yvec, evec, rvx, jrv, jrvlab)
rv_mvec = [rvx, (rvy)*cfactor, m*cfactor, (rvy+m)*cfactor]
model_labels = ['Planetary signal', 'GP', 'P+GP']
mcolors = ['r', 'b', 'k']
malpha = [0.7, 0.7, 0.9]
else:
rv_mvec = [rvx, (rvy)*cfactor]
model_labels = ['Full model']
mcolors = ['k']
malpha = [1.]
# Name of plot file
fname = outdir+'/'+star+'_rv_timeseries.pdf'
# Name of residuals file
out_f = outdir+'/'+star+'_rv_residuals.dat'
# Create the vectors to be used in create_nice_plot()
vec_x = np.concatenate(time_all)
#yvec_noplanet = np.concatenate(rv_residuals)
vec_y = np.concatenate(rv_residuals)
if kernel_rv[0:2] == 'MQ' or kernel_rv[0:2] == 'ME' or kernel_rv[0:2] == 'MM':
vec_y = np.array(yvec)
vec_z = np.concatenate(new_errs_all)/cfactor
#
xdata = vec_x
ydata = np.asarray(np.concatenate(rv_no_offset))*cfactor
edata = np.asarray(np.concatenate(errs_all))*cfactor
ejdata = np.asarray(np.concatenate(new_errs_all))
res = np.asarray(vec_y)*cfactor
if kernel_rv[0:2] == 'MQ' or kernel_rv[0:2] == 'ME' or kernel_rv[0:2] == 'MM':
ns = int(kernel_rv[2])
ts_len = int(len(xdata)/ns)
# Create the vector with the data needed in the create_nice_plot function
for o in range(0, ns):
rv_dvec = [xdata[o*ts_len:(o+1)*ts_len], ydata[o*ts_len:(o+1)*ts_len], edata[o*ts_len:(o+1)*ts_len],
ejdata[o*ts_len:(o+1)*ts_len], res[o*ts_len:(o+1)*ts_len], tlab[o*ts_len:(o+1)*ts_len]]
rv_dvecnp = np.asarray(rv_dvec)
mvec = plot_vector[o]
mvecnp = np.asarray(mvec)
np.savetxt(outdir+'/timeseries_model'+str(o) +
'.dat', mvecnp.T, fmt='%8.8f')
np.savetxt(outdir+'/timeseries_data'+str(o)+'.dat', rv_dvecnp.T, fmt='%8.8f %8.8f %8.8f %8.8f %8.8f %i',
header='time rv erv jitter rvnoplanet tlab', comments='#')
if o == 0:
model_labels = ['Planetary signal', 'GP', 'P+GP']
mcolors = ['r', 'b', 'k']
malpha = [0.7, 0.7, 0.9]
fname = outdir+'/'+star+'_rv_timeseries.pdf'
else:
model_labels = ['GP timeseries'+str(o+1)]
mcolors = ['k']
malpha = [0.9]
fname = outdir+'/'+star+'_timeseries'+str(o+1)+'.pdf'
if (len(rv_labels) == 1):
plot_labels_rv = [rv_xlabel, 'RV (m/s)', 'Residuals (m/s)']
else:
plot_labels_rv = [
rv_xlabel[o*ns:(o+1)*ns], rv_labels[o], rv_res[o]]
#The mvec[:-1] is to ignore the extra dimension added to create the variance of the P
create_nice_plot(mvec[:-1], rv_dvec, plot_labels_rv, model_labels, telescopes_labels, fname, std_model=mvec[-1],
plot_residuals=False, fsx=2*fsx, model_colors=mcolors, model_alpha=malpha,colors=rv_colors)
else:
rv_dvec = [xdata, ydata, edata, ejdata, res, tlab]
rv_dvecnp = np.asarray(rv_dvec)
mvecnp = np.asarray(rv_mvec)
np.savetxt(outdir+'/timeseries_model_rv.dat', mvecnp.T, fmt='%8.8f')
np.savetxt(outdir+'/timeseries_data_rv.dat', rv_dvecnp.T, fmt='%8.8f %8.8f %8.8f %8.8f %8.8f %i',
header='time rv erv jitter rvnoplanet tlab', comments='#')
plot_labels_rv = [rv_xlabel, 'RV (m/s)', 'Residuals (m/s)']
# Create the RV timeseries plot
create_nice_plot(rv_mvec, rv_dvec, plot_labels_rv, model_labels, telescopes_labels, fname,
plot_residuals=False, fsx=2*fsx, model_colors=mcolors, model_alpha=malpha,colors=rv_colors)
# Create residuals file
of = open(out_f, 'w')
for i in range(0, len(vec_x)):
of.write(' %8.8f %8.8f %8.8f %s \n' % (vec_x[i], res[i]*1e-3, vec_z[i], telescopes_labels[tlab[i]]))
of.close()
return rv_residuals
# ===========================================================
# RV multi-planet fit
# ===========================================================
def plot_rv_phasefolded():
cfactor = np.float(1.e3)
for i in range(0, nplanets):
# Create the RV fitted model for the planet i
rvx = np.arange(t0_val[i], t0_val[i]+P_val[i]*0.999, P_val[i]/4999.)
rvy = pti.rv_curve_mp(
rvx, 0.0, t0_val[i], k_val[i], P_val[i], e_val[i], w_val[i], 0.0, 0.0)
# rvx and rvy are the model timeseries for planet i
#Let us compute the shadow region for the RV plots
rv_std = []
if plot_rv_std:
#Compute 1000 random models from the samples
rvy_vec = [None]*1000
for j in range(1000):
my_j = np.random.randint(len(T0_vec[i]))
rvy_vec[j] = pti.rv_curve_mp(
rvx, 0.0, T0_vec[i][j], k_vec[i][j], P_vec[i][j], e_vec[i][j], w_vec[i][j], 0.0, 0.0)
#Compute the standard deviation of the models sample
rv_std = np.std(rvy_vec,axis=0)
rv_std *= cfactor
# Now it is time to remove the planets j != i from the data
rv_pi = pti.rv_curve_mp(
rv_time, 0.0, t0_val[i], k_val[i], P_val[i], e_val[i], w_val[i], 0., 0.)
# This variable has all the planets
rv_pall = pti.rv_curve_mp(
rv_time, 0.0, t0_val, k_val, P_val, e_val, w_val, 0.0, 0.0)
# Let us remove all the signals from the data
res = np.zeros(shape=len(rv_vals))
for m in range(0, len(rv_vals)):
res[m] = rv_vals[m] - v_val[tlab[m]] - rv_pall[m] \
- alpha_val*(rv_time[m] - t0_val[0]) - \
beta_val*(rv_time[m] - t0_val[0])**2
# Let us add the signal of the planet i to the data
rv_planet_i = res + rv_pi
# Did we fit for a GP?
evec = np.asarray(rv_errs)
if kernel_rv[0:2] != 'No':
xvec = rv_time
yvec = res
kernel_val, C = pti.pred_gp(
kernel_rv, pk_rv, xvec, yvec, evec, xvec, jrv, jrvlab)
res = res - kernel_val
rv_planet_i = rv_planet_i - kernel_val
rvy = np.asarray(rvy)*cfactor
res = np.asarray(res)*cfactor
rv_planet_i = np.asarray(rv_planet_i)*cfactor
evec = evec*cfactor
ejvec = np.concatenate(new_errs_all)
p_rv = scale_period(rvx, t0_val[i], P_val[i])
p_all = scale_period(rv_time, t0_val[i], P_val[i])
fname = outdir+'/'+star+plabels[i]+'_rv.pdf'
# plot_rv_fancy([p_rv,rvy,p_all,rv_planet_i,evec,ejvec,res,tlab],fname)
rv_dvec = np.array([p_all, rv_planet_i, evec, ejvec, res, tlab])
tellabs = telescopes_labels
if kernel_rv[0:2] == 'MQ' or kernel_rv[0:2] == 'ME' or kernel_rv[0:2] == 'MM':
ns = int(kernel_rv[2])
nd = int(len(p_all)/ns)
rv_dvec = [p_all[0:nd], rv_planet_i[0:nd],
evec[0:nd], ejvec[0:nd], res[0:nd], tlab[0:nd]]
tellabs = [0]
tellabs = telescopes_labels[0:int((len(telescopes_labels))/ns)]
rv_dvec = np.array(rv_dvec)
rv_mvec = np.array([p_rv, rvy])
plot_labels_rv = ['Orbital phase', 'RV (m/s)', 'Residuals (m/s)']
model_labels = ['']
np.savetxt(fname[:-4]+'-data.dat',rv_dvec.T,header='phase rv_planet'+plabels[i]+'(m/s) eRV(m/s) eRV_with_jitter(m/s) residuals(m/s) instrument',
fmt='%4.8f %4.8f %4.8f %4.8f %4.8f %i')
np.savetxt(fname[:-4]+'-model.dat',rv_mvec.T,header='phase rv_planet'+plabels[i]+'(m/s)',fmt='%4.8f %8.8f')
create_nice_plot(rv_mvec, rv_dvec, plot_labels_rv,
model_labels, tellabs, fname,colors=rv_colors,std_model=rv_std)
| oscaribv/pyaneti | src/plot_rv.py | Python | gpl-3.0 | 12,995 | [
"Gaussian"
] | 85828293cd558555016990163bd244f52203cdb498555b5d85af5ca5024e5c9e |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class StateNetworkSizeResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, meta=None, network_sizes=None):
"""
StateNetworkSizeResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'meta': 'Meta',
'network_sizes': 'list[NetworkSize]'
}
self.attribute_map = {
'meta': 'meta',
'network_sizes': 'network_sizes'
}
self._meta = meta
self._network_sizes = network_sizes
@property
def meta(self):
"""
Gets the meta of this StateNetworkSizeResponse.
Meta-data
:return: The meta of this StateNetworkSizeResponse.
:rtype: Meta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""
Sets the meta of this StateNetworkSizeResponse.
Meta-data
:param meta: The meta of this StateNetworkSizeResponse.
:type: Meta
"""
self._meta = meta
@property
def network_sizes(self):
"""
Gets the network_sizes of this StateNetworkSizeResponse.
Network Sizes
:return: The network_sizes of this StateNetworkSizeResponse.
:rtype: list[NetworkSize]
"""
return self._network_sizes
@network_sizes.setter
def network_sizes(self, network_sizes):
"""
Sets the network_sizes of this StateNetworkSizeResponse.
Network Sizes
:param network_sizes: The network_sizes of this StateNetworkSizeResponse.
:type: list[NetworkSize]
"""
self._network_sizes = network_sizes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| vericred/vericred-python | vericred_client/models/state_network_size_response.py | Python | apache-2.0 | 12,891 | [
"VisIt"
] | c0637598387358186445136a44bd8e8dd996c9343394f9d13728e0957d50e57b |
import mmap
from bup import _helpers
from bup.helpers import *
MIDX_VERSION = 4
extract_bits = _helpers.extract_bits
_total_searches = 0
_total_steps = 0
class PackMidx:
"""Wrapper which contains data from multiple index files.
Multiple index (.midx) files constitute a wrapper around index (.idx) files
and make it possible for bup to expand Git's indexing capabilities to vast
amounts of files.
"""
def __init__(self, filename):
self.name = filename
self.force_keep = False
assert(filename.endswith('.midx'))
self.map = mmap_read(open(filename))
if str(self.map[0:4]) != 'MIDX':
log('Warning: skipping: invalid MIDX header in %r\n' % filename)
self.force_keep = True
return self._init_failed()
ver = struct.unpack('!I', self.map[4:8])[0]
if ver < MIDX_VERSION:
log('Warning: ignoring old-style (v%d) midx %r\n'
% (ver, filename))
self.force_keep = False # old stuff is boring
return self._init_failed()
if ver > MIDX_VERSION:
log('Warning: ignoring too-new (v%d) midx %r\n'
% (ver, filename))
self.force_keep = True # new stuff is exciting
return self._init_failed()
self.bits = _helpers.firstword(self.map[8:12])
self.entries = 2**self.bits
self.fanout = buffer(self.map, 12, self.entries*4)
self.sha_ofs = 12 + self.entries*4
self.nsha = nsha = self._fanget(self.entries-1)
self.shatable = buffer(self.map, self.sha_ofs, nsha*20)
self.which_ofs = self.sha_ofs + 20*nsha
self.whichlist = buffer(self.map, self.which_ofs, nsha*4)
self.idxnames = str(self.map[self.which_ofs + 4*nsha:]).split('\0')
def _init_failed(self):
self.bits = 0
self.entries = 1
self.fanout = buffer('\0\0\0\0')
self.shatable = buffer('\0'*20)
self.idxnames = []
def _fanget(self, i):
start = i*4
s = self.fanout[start:start+4]
return _helpers.firstword(s)
def _get(self, i):
return str(self.shatable[i*20:(i+1)*20])
def _get_idx_i(self, i):
return struct.unpack('!I', self.whichlist[i*4:(i+1)*4])[0]
def _get_idxname(self, i):
return self.idxnames[self._get_idx_i(i)]
def exists(self, hash, want_source=False):
"""Return nonempty if the object exists in the index files."""
global _total_searches, _total_steps
_total_searches += 1
want = str(hash)
el = extract_bits(want, self.bits)
if el:
start = self._fanget(el-1)
startv = el << (32-self.bits)
else:
start = 0
startv = 0
end = self._fanget(el)
endv = (el+1) << (32-self.bits)
_total_steps += 1 # lookup table is a step
hashv = _helpers.firstword(hash)
#print '(%08x) %08x %08x %08x' % (extract_bits(want, 32), startv, hashv, endv)
while start < end:
_total_steps += 1
#print '! %08x %08x %08x %d - %d' % (startv, hashv, endv, start, end)
mid = start + (hashv-startv)*(end-start-1)/(endv-startv)
#print ' %08x %08x %08x %d %d %d' % (startv, hashv, endv, start, mid, end)
v = self._get(mid)
#print ' %08x' % self._num(v)
if v < want:
start = mid+1
startv = _helpers.firstword(v)
elif v > want:
end = mid
endv = _helpers.firstword(v)
else: # got it!
return want_source and self._get_idxname(mid) or True
return None
def __iter__(self):
for i in xrange(self._fanget(self.entries-1)):
yield buffer(self.shatable, i*20, 20)
def __len__(self):
return int(self._fanget(self.entries-1))
| fengyuanjs/catawampus | tr/vendor/bup/lib/bup/midx.py | Python | apache-2.0 | 3,935 | [
"exciting"
] | 3956a10e63f3fe08a09ff74ea5465b63343db0ac09b562c19f4fcc46f5442e2e |
from os import path, makedirs
import itertools
from netCDF4 import Dataset
import sys
import time
from gusto.diagnostics import Diagnostics, Perturbation, SteadyStateError
from firedrake import (FiniteElement, TensorProductElement, HDiv, DirichletBC,
FunctionSpace, MixedFunctionSpace, VectorFunctionSpace,
interval, Function, Mesh, functionspaceimpl,
File, SpatialCoordinate, sqrt, Constant, inner,
op2, DumbCheckpoint, FILE_CREATE, FILE_READ, interpolate,
CellNormal, cross, as_vector)
import numpy as np
from gusto.configuration import logger, set_log_handler
__all__ = ["State"]
class SpaceCreator(object):
def __call__(self, name, mesh=None, family=None, degree=None):
try:
return getattr(self, name)
except AttributeError:
value = FunctionSpace(mesh, family, degree)
setattr(self, name, value)
return value
class FieldCreator(object):
def __init__(self, fieldlist=None, xn=None, dumplist=None, pickup=True):
self.fields = []
if fieldlist is not None:
for name, func in zip(fieldlist, xn.split()):
setattr(self, name, func)
func.dump = name in dumplist
func.pickup = pickup
func.rename(name)
self.fields.append(func)
def __call__(self, name, space=None, dump=True, pickup=True):
try:
return getattr(self, name)
except AttributeError:
value = Function(space, name=name)
setattr(self, name, value)
value.dump = dump
value.pickup = pickup
self.fields.append(value)
return value
def __iter__(self):
return iter(self.fields)
class PointDataOutput(object):
def __init__(self, filename, ndt, field_points, description,
field_creator, comm, create=True):
"""Create a dump file that stores fields evaluated at points.
:arg filename: The filename.
:arg field_points: Iterable of pairs (field_name, evaluation_points).
:arg description: Description of the simulation.
:arg field_creator: The field creator (only used to determine
datatype and shape of fields).
:kwarg create: If False, assume that filename already exists
"""
# Overwrite on creation.
self.dump_count = 0
self.filename = filename
self.field_points = field_points
self.comm = comm
if not create:
return
if self.comm.rank == 0:
with Dataset(filename, "w") as dataset:
dataset.description = "Point data for simulation {desc}".format(desc=description)
dataset.history = "Created {t}".format(t=time.ctime())
# FIXME add versioning information.
dataset.source = "Output from Gusto model"
# Appendable dimension, timesteps in the model
dataset.createDimension("time", None)
var = dataset.createVariable("time", np.float64, ("time"))
var.units = "seconds"
# Now create the variable group for each field
for field_name, points in field_points:
group = dataset.createGroup(field_name)
npts, dim = points.shape
group.createDimension("points", npts)
group.createDimension("geometric_dimension", dim)
var = group.createVariable("points", points.dtype,
("points", "geometric_dimension"))
var[:] = points
# Get the UFL shape of the field
field_shape = field_creator(field_name).ufl_shape
# Number of geometric dimension occurences should be the same as the length of the UFL shape
field_len = len(field_shape)
field_count = field_shape.count(dim)
assert field_len == field_count, "Geometric dimension occurrences do not match UFL shape"
# Create the variable with the required shape
dimensions = ("time", "points") + field_count*("geometric_dimension",)
group.createVariable(field_name, field_creator(field_name).dat.dtype, dimensions)
def dump(self, field_creator, t):
"""Evaluate and dump field data at points.
:arg field_creator: :class:`FieldCreator` for accessing
fields.
:arg t: Simulation time at which dump occurs.
"""
val_list = []
for field_name, points in self.field_points:
val_list.append((field_name, np.asarray(field_creator(field_name).at(points))))
if self.comm.rank == 0:
with Dataset(self.filename, "a") as dataset:
# Add new time index
dataset.variables["time"][self.dump_count] = t
for field_name, vals in val_list:
group = dataset.groups[field_name]
var = group.variables[field_name]
var[self.dump_count, :] = vals
self.dump_count += 1
class DiagnosticsOutput(object):
def __init__(self, filename, diagnostics, description, comm, create=True):
"""Create a dump file that stores diagnostics.
:arg filename: The filename.
:arg diagnostics: The :class:`Diagnostics` object.
:arg description: A description.
:kwarg create: If False, assume that filename already exists
"""
self.filename = filename
self.diagnostics = diagnostics
self.comm = comm
if not create:
return
if self.comm.rank == 0:
with Dataset(filename, "w") as dataset:
dataset.description = "Diagnostics data for simulation {desc}".format(desc=description)
dataset.history = "Created {t}".format(t=time.ctime())
dataset.source = "Output from Gusto model"
dataset.createDimension("time", None)
var = dataset.createVariable("time", np.float64, ("time", ))
var.units = "seconds"
for name in diagnostics.fields:
group = dataset.createGroup(name)
for diagnostic in diagnostics.available_diagnostics:
group.createVariable(diagnostic, np.float64, ("time", ))
def dump(self, state, t):
"""Dump diagnostics.
:arg state: The :class:`State` at which to compute the diagnostic.
:arg t: The current time.
"""
diagnostics = []
for fname in self.diagnostics.fields:
field = state.fields(fname)
for dname in self.diagnostics.available_diagnostics:
diagnostic = getattr(self.diagnostics, dname)
diagnostics.append((fname, dname, diagnostic(field)))
if self.comm.rank == 0:
with Dataset(self.filename, "a") as dataset:
idx = dataset.dimensions["time"].size
dataset.variables["time"][idx:idx + 1] = t
for fname, dname, value in diagnostics:
group = dataset.groups[fname]
var = group.variables[dname]
var[idx:idx + 1] = value
class State(object):
"""
Build a model state to keep the variables in, and specify parameters.
:arg mesh: The :class:`Mesh` to use.
:arg vertical_degree: integer, required for vertically extruded meshes.
Specifies the degree for the pressure space in the vertical
(the degrees for other spaces are inferred). Defaults to None.
:arg horizontal_degree: integer, the degree for spaces in the horizontal
(specifies the degree for the pressure space, other spaces are inferred)
defaults to 1.
:arg family: string, specifies the velocity space family to use.
Options:
"RT": The Raviart-Thomas family (default, recommended for quads)
"BDM": The BDM family
"BDFM": The BDFM family
:arg Coriolis: (optional) Coriolis function.
:arg sponge_function: (optional) Function specifying a sponge layer.
:arg timestepping: class containing timestepping parameters
:arg output: class containing output parameters
:arg parameters: class containing physical parameters
:arg diagnostics: class containing diagnostic methods
:arg fieldlist: list of prognostic field names
:arg diagnostic_fields: list of diagnostic field classes
:arg u_bc_ids: a list containing the ids of boundaries with no normal
component of velocity. These ids are passed to `DirichletBC`s. For
extruded meshes, top and bottom are added automatically.
"""
def __init__(self, mesh, vertical_degree=None, horizontal_degree=1,
family="RT",
Coriolis=None, sponge_function=None,
hydrostatic=None,
timestepping=None,
output=None,
parameters=None,
diagnostics=None,
fieldlist=None,
diagnostic_fields=None,
u_bc_ids=None):
self.family = family
self.vertical_degree = vertical_degree
self.horizontal_degree = horizontal_degree
self.Omega = Coriolis
self.mu = sponge_function
self.hydrostatic = hydrostatic
self.timestepping = timestepping
if output is None:
raise RuntimeError("You must provide a directory name for dumping results")
else:
self.output = output
self.parameters = parameters
if fieldlist is None:
raise RuntimeError("You must provide a fieldlist containing the names of the prognostic fields")
else:
self.fieldlist = fieldlist
if diagnostics is not None:
self.diagnostics = diagnostics
else:
self.diagnostics = Diagnostics(*fieldlist)
if diagnostic_fields is not None:
self.diagnostic_fields = diagnostic_fields
else:
self.diagnostic_fields = []
if u_bc_ids is not None:
self.u_bc_ids = u_bc_ids
else:
self.u_bc_ids = []
# The mesh
self.mesh = mesh
# Build the spaces
self._build_spaces(mesh, vertical_degree, horizontal_degree, family)
# Allocate state
self._allocate_state()
if self.output.dumplist is None:
self.output.dumplist = fieldlist
self.fields = FieldCreator(fieldlist, self.xn, self.output.dumplist)
# set up bcs
V = self.fields('u').function_space()
self.bcs = []
if V.extruded:
self.bcs.append(DirichletBC(V, 0.0, "bottom"))
self.bcs.append(DirichletBC(V, 0.0, "top"))
for id in self.u_bc_ids:
self.bcs.append(DirichletBC(V, 0.0, id))
self.dumpfile = None
# figure out if we're on a sphere
try:
self.on_sphere = (mesh._base_mesh.geometric_dimension() == 3 and mesh._base_mesh.topological_dimension() == 2)
except AttributeError:
self.on_sphere = (mesh.geometric_dimension() == 3 and mesh.topological_dimension() == 2)
# build the vertical normal and define perp for 2d geometries
dim = mesh.topological_dimension()
if self.on_sphere:
x = SpatialCoordinate(mesh)
R = sqrt(inner(x, x))
self.k = interpolate(x/R, mesh.coordinates.function_space())
if dim == 2:
outward_normals = CellNormal(mesh)
self.perp = lambda u: cross(outward_normals, u)
else:
kvec = [0.0]*dim
kvec[dim-1] = 1.0
self.k = Constant(kvec)
if dim == 2:
self.perp = lambda u: as_vector([-u[1], u[0]])
# project test function for hydrostatic case
if self.hydrostatic:
self.h_project = lambda u: u - self.k*inner(u, self.k)
else:
self.h_project = lambda u: u
# Constant to hold current time
self.t = Constant(0.0)
# setup logger
logger.setLevel(output.log_level)
set_log_handler(mesh.comm)
logger.info("Timestepping parameters that take non-default values:")
logger.info(", ".join("%s: %s" % item for item in vars(timestepping).items()))
if parameters is not None:
logger.info("Physical parameters that take non-default values:")
logger.info(", ".join("%s: %s" % item for item in vars(parameters).items()))
def setup_diagnostics(self):
"""
Add special case diagnostic fields
"""
for name in self.output.perturbation_fields:
f = Perturbation(name)
self.diagnostic_fields.append(f)
for name in self.output.steady_state_error_fields:
f = SteadyStateError(self, name)
self.diagnostic_fields.append(f)
fields = set([f.name() for f in self.fields])
field_deps = [(d, sorted(set(d.required_fields).difference(fields),)) for d in self.diagnostic_fields]
schedule = topo_sort(field_deps)
self.diagnostic_fields = schedule
for diagnostic in self.diagnostic_fields:
diagnostic.setup(self)
self.diagnostics.register(diagnostic.name)
def setup_dump(self, t, tmax, pickup=False):
"""
Setup dump files
Check for existence of directory so as not to overwrite
output files
Setup checkpoint file
:arg tmax: model stop time
:arg pickup: recover state from the checkpointing file if true,
otherwise dump and checkpoint to disk. (default is False).
"""
if any([self.output.dump_vtus, self.output.dumplist_latlon,
self.output.dump_diagnostics, self.output.point_data,
self.output.checkpoint and not pickup]):
# setup output directory and check that it does not already exist
self.dumpdir = path.join("results", self.output.dirname)
running_tests = '--running-tests' in sys.argv or "pytest" in self.output.dirname
if self.mesh.comm.rank == 0:
if not running_tests and path.exists(self.dumpdir) and not pickup:
raise IOError("results directory '%s' already exists"
% self.dumpdir)
else:
if not running_tests:
makedirs(self.dumpdir)
if self.output.dump_vtus:
# setup pvd output file
outfile = path.join(self.dumpdir, "field_output.pvd")
self.dumpfile = File(
outfile, project_output=self.output.project_fields,
comm=self.mesh.comm)
# make list of fields to dump
self.to_dump = [field for field in self.fields if field.dump]
# make dump counter
self.dumpcount = itertools.count()
# if there are fields to be dumped in latlon coordinates,
# setup the latlon coordinate mesh and make output file
if len(self.output.dumplist_latlon) > 0:
mesh_ll = get_latlon_mesh(self.mesh)
outfile_ll = path.join(self.dumpdir, "field_output_latlon.pvd")
self.dumpfile_ll = File(outfile_ll,
project_output=self.output.project_fields,
comm=self.mesh.comm)
# make functions on latlon mesh, as specified by dumplist_latlon
self.to_dump_latlon = []
for name in self.output.dumplist_latlon:
f = self.fields(name)
field = Function(
functionspaceimpl.WithGeometry(
f.function_space(), mesh_ll),
val=f.topological, name=name+'_ll')
self.to_dump_latlon.append(field)
# we create new netcdf files to write to, unless pickup=True, in
# which case we just need the filenames
if self.output.dump_diagnostics:
diagnostics_filename = self.dumpdir+"/diagnostics.nc"
self.diagnostic_output = DiagnosticsOutput(diagnostics_filename,
self.diagnostics,
self.output.dirname,
self.mesh.comm,
create=not pickup)
if len(self.output.point_data) > 0:
pointdata_filename = self.dumpdir+"/point_data.nc"
ndt = int(tmax/self.timestepping.dt)
self.pointdata_output = PointDataOutput(pointdata_filename, ndt,
self.output.point_data,
self.output.dirname,
self.fields,
self.mesh.comm,
create=not pickup)
# if we want to checkpoint and are not picking up from a previous
# checkpoint file, setup the dumb checkpointing
if self.output.checkpoint and not pickup:
self.chkpt = DumbCheckpoint(path.join(self.dumpdir, "chkpt"),
mode=FILE_CREATE)
# make list of fields to pickup (this doesn't include
# diagnostic fields)
self.to_pickup = [field for field in self.fields if field.pickup]
# if we want to checkpoint then make a checkpoint counter
if self.output.checkpoint:
self.chkptcount = itertools.count()
# dump initial fields
self.dump(t)
def pickup_from_checkpoint(self):
"""
:arg t: the current model time (default is zero).
"""
if self.output.checkpoint:
# Open the checkpointing file for writing
chkfile = path.join(self.dumpdir, "chkpt")
with DumbCheckpoint(chkfile, mode=FILE_READ) as chk:
# Recover all the fields from the checkpoint
for field in self.to_pickup:
chk.load(field)
t = chk.read_attribute("/", "time")
next(self.dumpcount)
# Setup new checkpoint
self.chkpt = DumbCheckpoint(path.join(self.dumpdir, "chkpt"), mode=FILE_CREATE)
else:
raise ValueError("Must set checkpoint True if pickup")
return t
def dump(self, t):
"""
Dump output
"""
output = self.output
# Diagnostics:
# Compute diagnostic fields
for field in self.diagnostic_fields:
field(self)
if output.dump_diagnostics:
# Output diagnostic data
self.diagnostic_output.dump(self, t)
if len(output.point_data) > 0:
# Output pointwise data
self.pointdata_output.dump(self.fields, t)
# Dump all the fields to the checkpointing file (backup version)
if output.checkpoint and (next(self.chkptcount) % output.chkptfreq) == 0:
for field in self.to_pickup:
self.chkpt.store(field)
self.chkpt.write_attribute("/", "time", t)
if output.dump_vtus and (next(self.dumpcount) % output.dumpfreq) == 0:
# dump fields
self.dumpfile.write(*self.to_dump)
# dump fields on latlon mesh
if len(output.dumplist_latlon) > 0:
self.dumpfile_ll.write(*self.to_dump_latlon)
def initialise(self, initial_conditions):
"""
Initialise state variables
:arg initial_conditions: An iterable of pairs (field_name, pointwise_value)
"""
for name, ic in initial_conditions:
f_init = getattr(self.fields, name)
f_init.assign(ic)
f_init.rename(name)
def set_reference_profiles(self, reference_profiles):
"""
Initialise reference profiles
:arg reference_profiles: An iterable of pairs (field_name, interpolatory_value)
"""
for name, profile in reference_profiles:
field = getattr(self.fields, name)
ref = self.fields(name+'bar', field.function_space(), False)
ref.interpolate(profile)
def _build_spaces(self, mesh, vertical_degree, horizontal_degree, family):
"""
Build:
velocity space self.V2,
pressure space self.V3,
temperature space self.Vt,
mixed function space self.W = (V2,V3,Vt)
"""
self.spaces = SpaceCreator()
if vertical_degree is not None:
# horizontal base spaces
cell = mesh._base_mesh.ufl_cell().cellname()
S1 = FiniteElement(family, cell, horizontal_degree+1)
S2 = FiniteElement("DG", cell, horizontal_degree, variant="equispaced")
# vertical base spaces
T0 = FiniteElement("CG", interval, vertical_degree+1, variant="equispaced")
T1 = FiniteElement("DG", interval, vertical_degree, variant="equispaced")
# build spaces V2, V3, Vt
V2h_elt = HDiv(TensorProductElement(S1, T1))
V2t_elt = TensorProductElement(S2, T0)
V3_elt = TensorProductElement(S2, T1)
V2v_elt = HDiv(V2t_elt)
V2_elt = V2h_elt + V2v_elt
V0 = self.spaces("HDiv", mesh, V2_elt)
V1 = self.spaces("DG", mesh, V3_elt)
V2 = self.spaces("HDiv_v", mesh, V2t_elt)
self.Vv = self.spaces("Vv", mesh, V2v_elt)
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_elt = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
self.DG1_space = self.spaces("DG1", mesh, DG1_elt)
self.W = MixedFunctionSpace((V0, V1, V2))
else:
cell = mesh.ufl_cell().cellname()
V1_elt = FiniteElement(family, cell, horizontal_degree+1)
DG_elt = FiniteElement("DG", cell, horizontal_degree, variant="equispaced")
DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
V0 = self.spaces("HDiv", mesh, V1_elt)
V1 = self.spaces("DG", mesh, DG_elt)
self.DG1_space = self.spaces("DG1", mesh, DG1_elt)
self.W = MixedFunctionSpace((V0, V1))
def _allocate_state(self):
"""
Construct Functions to store the state variables.
"""
W = self.W
self.xn = Function(W)
self.xstar = Function(W)
self.xp = Function(W)
self.xnp1 = Function(W)
self.xrhs = Function(W)
self.xb = Function(W) # store the old state for diagnostics
self.dy = Function(W)
def get_latlon_mesh(mesh):
coords_orig = mesh.coordinates
coords_fs = coords_orig.function_space()
if coords_fs.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_elt = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
vec_DG1 = VectorFunctionSpace(mesh, DG1_elt)
coords_dg = Function(vec_DG1).interpolate(coords_orig)
coords_latlon = Function(vec_DG1)
shapes = {"nDOFs": vec_DG1.finat_element.space_dimension(), 'dim': 3}
radius = np.min(np.sqrt(coords_dg.dat.data[:, 0]**2 + coords_dg.dat.data[:, 1]**2 + coords_dg.dat.data[:, 2]**2))
# lat-lon 'x' = atan2(y, x)
coords_latlon.dat.data[:, 0] = np.arctan2(coords_dg.dat.data[:, 1], coords_dg.dat.data[:, 0])
# lat-lon 'y' = asin(z/sqrt(x^2 + y^2 + z^2))
coords_latlon.dat.data[:, 1] = np.arcsin(coords_dg.dat.data[:, 2]/np.sqrt(coords_dg.dat.data[:, 0]**2 + coords_dg.dat.data[:, 1]**2 + coords_dg.dat.data[:, 2]**2))
# our vertical coordinate is radius - the minimum radius
coords_latlon.dat.data[:, 2] = np.sqrt(coords_dg.dat.data[:, 0]**2 + coords_dg.dat.data[:, 1]**2 + coords_dg.dat.data[:, 2]**2) - radius
# We need to ensure that all points in a cell are on the same side of the branch cut in longitude coords
# This kernel amends the longitude coords so that all longitudes in one cell are close together
kernel = op2.Kernel("""
#define PI 3.141592653589793
#define TWO_PI 6.283185307179586
void splat_coords(double *coords) {{
double max_diff = 0.0;
double diff = 0.0;
for (int i=0; i<{nDOFs}; i++) {{
for (int j=0; j<{nDOFs}; j++) {{
diff = coords[i*{dim}] - coords[j*{dim}];
if (fabs(diff) > max_diff) {{
max_diff = diff;
}}
}}
}}
if (max_diff > PI) {{
for (int i=0; i<{nDOFs}; i++) {{
if (coords[i*{dim}] < 0) {{
coords[i*{dim}] += TWO_PI;
}}
}}
}}
}}
""".format(**shapes), "splat_coords")
op2.par_loop(kernel, coords_latlon.cell_set,
coords_latlon.dat(op2.RW, coords_latlon.cell_node_map()))
return Mesh(coords_latlon)
def topo_sort(field_deps):
name2field = dict((f.name, f) for f, _ in field_deps)
# map node: (input_deps, output_deps)
graph = dict((f.name, (list(deps), [])) for f, deps in field_deps)
roots = []
for f, input_deps in field_deps:
if len(input_deps) == 0:
# No dependencies, candidate for evaluation
roots.append(f.name)
for d in input_deps:
# add f as output dependency
graph[d][1].append(f.name)
schedule = []
while roots:
n = roots.pop()
schedule.append(n)
output_deps = list(graph[n][1])
for m in output_deps:
# Remove edge
graph[m][0].remove(n)
graph[n][1].remove(m)
# If m now as no input deps, candidate for evaluation
if len(graph[m][0]) == 0:
roots.append(m)
if any(len(i) for i, _ in graph.values()):
cycle = "\n".join("%s -> %s" % (f, i) for f, (i, _) in graph.items()
if f not in schedule)
raise RuntimeError("Field dependencies have a cycle:\n\n%s" % cycle)
return list(map(name2field.__getitem__, schedule))
| firedrakeproject/dcore | gusto/state.py | Python | mit | 26,831 | [
"NetCDF"
] | 531202d9194fc7899c42259819ec2fe953b001a816a02f302b4aacb26b8507b7 |
# proxy module
from __future__ import absolute_import
from mayavi.filters.mask_points import *
| enthought/etsproxy | enthought/mayavi/filters/mask_points.py | Python | bsd-3-clause | 95 | [
"Mayavi"
] | 158fe3c4f9cd1c64fdb2c2c327ea8ec5fa1303b4121ece6d1409b2b1e2e28b2e |
#!/usr/bin/env python
# By Jason Ladner
from __future__ import division
import sys, optparse, os, pysam, itertools
#Identifies individual reads covering 2 positions of interest and records the combined genotypes
#In version 2.0, starting saving read name info in order to handle R1 and R2 in a smarter way, but this could lead to high memory usage for large datasets
#If both reads cover positions of interest, and they agree about the genotypes, only one is counted
#If they disagree, neither is counted
def main():
#To parse command line
usage = "usage: %prog [options] sam1 [sam2 ...]"
p = optparse.OptionParser(usage)
p.add_option('-i', '--input', help='Text file with info for the position pairs to be examined. One line per pair, tab-delimited with the following columns: pos1, ref1, alt1, pos2, ref2, alt2 [None, OPT]')
p.add_option('-o', '--out', help='Name for output file, required if using the -i option. [None, OPT/REQ]')
# p.add_option('-s', '--sam', help='input sam file, or bam file. Should be sorted by query name [None, REQ]')
p.add_option('-q', '--minQual', default=20, type='int', help='Minimum base quals to be used in a geno [20]')
p.add_option('-m', '--mapQual', default=20, type='int', help='Minimum mapping quality for a read to be used [20]')
# p.add_option('-b', '--buffer', default=2, type='int', help='Number of bases to use as a buffer. If two insert locations are within this distance they are considered the same [2]')
p.add_option('-1', '--first', type='int', help='position in the reference of the left-most base to phase')
p.add_option('-2', '--second', type='int', help='position in the reference of the right-most base to phase')
p.add_option('--R1only', default = False, action = "store_true", help='Use this flag to only consider R1')
p.add_option('--R2only', default = False, action = "store_true", help='Use this flag to only consider R2')
opts, args = p.parse_args()
if opts.R1only and opts.R2only: print "--R1only and --R2only cannot be used together"
#Batch mode
elif opts.input:
fout = open(opts.out, 'w')
fout.write("File\tPosition 1\tPosition 2\tRef\Ref\tRef\Alt\tAlt\Ref\tAlt\Alt\n")
for each in args:
opts.sam = each
for line in open(opts.input, 'r'):
cols = line.strip().split("\t")
hap_tup_dict = phase(int(cols[0]), int(cols[3]), opts)
exp_geno_tups = make_exp_geno_tups(cols)
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%d\n" % (each, cols[0], cols[3], len(set(hap_tup_dict.get(exp_geno_tups[0], []))), len(set(hap_tup_dict.get(exp_geno_tups[1], []))), len(set(hap_tup_dict.get(exp_geno_tups[2], []))), len(set(hap_tup_dict.get(exp_geno_tups[3], [])))))
#Old way of just processing a single pair specified on the command line
else:
for each in args:
opts.sam = each
hap_tup_dict = phase(opts.first, opts.second, opts)
###-----------------End of main()--------------------------->>>
def make_exp_geno_tups(cols):
return [(cols[1],cols[4]),(cols[1],cols[5]),(cols[2],cols[4]),(cols[2],cols[5])]
def phase(pos1, pos2, opts):
hap_tup_dict={}
mutants={}
sam = pysam.Samfile(opts.sam)
#Step through each read in the sam file
for read in sam:
#Check to make sure the read is mapped and meets the mapping qulaity threshold
if not read.is_unmapped and read.mapping_quality >= opts.mapQual:
#Read type checks
if (opts.R1only and read.is_read1) or (opts.R2only and read.is_read2) or (not opts.R1only and not opts.R2only):
#Reference start and end are 0-based, but end points to one past the last base
if read.reference_start >= read.reference_end: print "!!!!End is NOT larger than start: start=%d, end=%d" % (read.reference_start, read.reference_end)
if max(pos1-1, pos2-1) < read.reference_end and min(pos1-1, pos2-1) >= read.reference_start:
read_ref_dict, ref_read_dict = get_paired_base_dicts(read)
first_geno, first_quals = read_base_at_ref_pos(read, read_ref_dict, ref_read_dict, pos1-1)
second_geno, second_quals = read_base_at_ref_pos(read, read_ref_dict, ref_read_dict, pos2-1)
if min(first_quals) >= opts.minQual and min(second_quals) >= opts.minQual:
if (first_geno, second_geno) not in hap_tup_dict: hap_tup_dict[(first_geno, second_geno)] = [read.query_name]
else: hap_tup_dict[(first_geno, second_geno)].append(read.query_name)
hap_tup_dict = rmv_inconsistent_pairs(hap_tup_dict)
print opts.sam
for key, val in hap_tup_dict.iteritems():
print key, len(val), len(set(val))
return hap_tup_dict
###------------->>>
def rmv_inconsistent_pairs(hap_tup_dict):
for a,b in itertools.combinations(hap_tup_dict.keys(), 2):
ovlp = set(hap_tup_dict[a]).intersection(set(hap_tup_dict[b]))
if ovlp:
for each in ovlp:
print each, a, b
hap_tup_dict[a].remove(each)
hap_tup_dict[b].remove(each)
return hap_tup_dict
def get_paired_base_dicts(read):
aligned_base_tuples = read.get_aligned_pairs()
read_ref_dict=dict(aligned_base_tuples)
# print read.get_aligned_pairs(with_seq=True)
ref_read_dict=dict((y, x) for x, y in aligned_base_tuples)
return read_ref_dict, ref_read_dict
def read_base_at_ref_pos(read, read_ref_dict, ref_read_dict, ref_pos):
quals=[]
read_pos = ref_read_dict[ref_pos]
if not read_pos: return '-', [1000]
else:
geno=read.query_sequence[read_pos]
quals.append(read.query_qualities[read_pos])
n=1
while not read_ref_dict[read_pos-n]:
geno = read.query_sequence[read_pos-n] + geno
quals.append(read.query_qualities[read_pos])
n+=1
return geno, quals
if __name__ == "__main__":
main()
| jtladner/Scripts | intraread_phasing/intraread_phasing_2.0.py | Python | gpl-3.0 | 6,122 | [
"pysam"
] | 20022008ed90a953001a6099c27e984f74c7997a91e7a23c22b3293844695f16 |
from pprint import pprint
# import pytest
from trackma.extras import AnimeInfoExtractor
DEFAULTS = {
'resolution': '',
'hash': '',
'subberTag': '',
'videoType': [],
'audioType': [],
'releaseSource': [],
'extension': '',
'episodeStart': None,
'episodeEnd': None,
'volumeStart': None,
'volumeEnd': None,
'version': 1,
'name': '',
'pv': -1,
'season': None,
}
def _assert_aie(filename, **assertions):
"""Helper for asserting AnimeInfoExtractor results.
Accepts a dict of assertions and asserts everything not provided as unchanged.
"""
aie = AnimeInfoExtractor(filename)
pprint(vars(aie)) # print defails for quicker debugging on failure
for key, default in DEFAULTS.items():
expected = assertions.get(key, default)
assert getattr(aie, key) == expected
return aie
def test_horriblesubs():
filename = "[HorribleSubs] Nobunaga-sensei no Osanazuma - 04 [720p].mkv"
aie = _assert_aie(
filename,
name="Nobunaga-sensei no Osanazuma",
episodeStart=4,
subberTag="HorribleSubs",
extension="mkv",
resolution="720p",
)
# check these only once
assert aie.originalFilename == filename
assert aie.getName() == "Nobunaga-sensei no Osanazuma"
assert aie.getEpisode() == 4
def test_compound_subber_tag_and_wierd_epnum():
_assert_aie(
"[VCB-Studio+Commie] Sword Art Online II [03].mkv",
name="Sword Art Online II",
episodeStart=3,
subberTag="VCB-Studio+Commie",
extension="mkv",
)
def test_late_subber_tag_with_hash_and_commas():
_assert_aie(
"Chio-chan no Tsuugakuro - 04 [HorribleSubs] [www, 720p, AAC] [5D4D1205].mkv",
name="Chio-chan no Tsuugakuro",
episodeStart=4,
subberTag="HorribleSubs",
extension="mkv",
resolution="720p",
audioType=["AAC"],
hash="5D4D1205",
releaseSource=["www"],
)
def test_dubsub():
_assert_aie(
"Arifureta E01v1 [1080p+][AAC][JapDub][GerSub][Web-DL].mkv",
name="Arifureta",
episodeStart=1,
subberTag="JapDub",
extension="mkv",
resolution="1080p",
audioType=["AAC"],
releaseSource=["Web-DL"],
)
def test_name_with_year():
_assert_aie(
"[TestTag] Bungou Stray Dogs (2019) - 06 [496D45BB].mkv",
name="Bungou Stray Dogs (2019)",
episodeStart=6,
subberTag="TestTag",
extension="mkv",
hash="496D45BB",
)
def test_name_with_year_and_extra_brackets():
_assert_aie(
"[Erai-raws] Fairy Tail (2018) - 45 [1080p][Multiple Subtitle].mkv",
name="Fairy Tail (2018)",
episodeStart=45,
subberTag="Erai-raws",
extension="mkv",
resolution="1080p",
)
def test_eac3():
_assert_aie(
"[PAS] Houseki no Kuni - 05 [WEB 720p E-AC-3] [F671AE53].mkv",
name="Houseki no Kuni",
episodeStart=5,
subberTag="PAS",
extension="mkv",
resolution="720p",
audioType=["E-AC-3"],
releaseSource=["WEB"],
hash="F671AE53",
)
def test_with_number_in_episode_title():
_assert_aie(
"[Opportunity] The Tatami Galaxy 10 - The 4.5-Tatami Idealogue [BD 720p] [FF757616].mkv",
name="The Tatami Galaxy",
episodeStart=10,
subberTag="Opportunity",
extension="mkv",
resolution="720p",
releaseSource=["BD"],
hash="FF757616",
)
def test_with_standalone_number_in_episode_title():
_assert_aie(
"Monogatari - S02E01 - Karen Bee - Part 2.mkv",
name="Monogatari 2",
season=2,
episodeStart=1,
extension="mkv",
)
def test_sXXeXX_and_sdtv():
_assert_aie(
"Clannad - S02E01 - A Farewell to the End of Summer SDTV.mkv",
name="Clannad 2",
season=2,
episodeStart=1,
extension="mkv",
resolution="SD",
releaseSource=["TV"],
)
def test_sXXeXX_and_trailing_hyphen():
_assert_aie(
"ReZERO -Starting Life in Another World- S02E06 [1080p][E-AC3].mkv",
name="ReZERO -Starting Life in Another World- 2",
season=2,
episodeStart=6,
extension="mkv",
resolution="1080p",
audioType=["E-AC3"],
)
def test_with_brackets():
_assert_aie(
"[HorribleSubs] Nakanohito Genome [Jikkyouchuu] - 01 [1080p].mkv",
name="Nakanohito Genome", # ' [Jikkyouchuu]' is stripped currently
episodeStart=1,
subberTag="HorribleSubs",
extension="mkv",
resolution="1080p",
)
def test_with_dots():
_assert_aie(
"Kill.la.Kill.S01E01.1080p-Hi10p.BluRay.FLAC2.0.x264-CTR.[98AA9B1C].mkv",
name="Kill la Kill",
season=1,
episodeStart=1,
extension="mkv",
resolution="1080p",
# releaseSource=["BluRay"],
# audioType=["FLAC"],
videoType=["H264", "Hi10P"],
hash="98AA9B1C",
)
def test_unusual_subber():
_assert_aie(
"[-__-'] Girls und Panzer OVA 6 [BD 1080p FLAC] [B13C83A0].mkv",
name="Girls und Panzer OVA",
episodeStart=6,
subberTag="-__-'",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["FLAC"],
hash="B13C83A0",
)
def test_unusual_subber_and_no_epnum():
_assert_aie(
"[-__-'] Girls und Panzer OVA Anzio-sen [BD 1080p FLAC] [231FDA45].mkv",
name="Girls und Panzer OVA Anzio-sen",
subberTag="-__-'",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["FLAC"],
hash="231FDA45",
)
def test_nothing_in_particular():
_assert_aie(
"[Underwater-FFF] Saki Zenkoku-hen - The Nationals - 01 [BD][1080p-FLAC][81722FD7].mkv",
name="Saki Zenkoku-hen - The Nationals",
episodeStart=1,
subberTag="Underwater-FFF",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["FLAC"],
hash="81722FD7",
)
def test_hi444pp_profile():
_assert_aie(
"[Erai-raws] Goblin Slayer - Goblin's Crown [BD][1080p YUV444P10][FLAC][Multiple Subtitle].mkv",
name="Goblin Slayer - Goblin's Crown",
subberTag="Erai-raws",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["FLAC"],
videoType=["H264", "Hi444PP"],
)
def test_jpbd_lpcm():
_assert_aie(
"[Koten_Gars] Kiddy Grade - Movie I [JP.BD][Hi10][1080p][LPCM] [2FAAB41B].mkv",
name="Kiddy Grade - Movie I",
subberTag="Koten_Gars",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["LPCM"],
videoType=["H264", "Hi10P"],
hash="2FAAB41B"
)
def test_underscores():
_assert_aie(
"[No]Touhou_Gensou_Mangekyou_-_01_(Hi10P)[26D7A2B3].mkv",
name="Touhou Gensou Mangekyou",
episodeStart=1,
subberTag="No",
extension="mkv",
videoType=["H264", "Hi10P"],
hash="26D7A2B3"
)
def test_literal_ep():
_assert_aie(
"Uzaki-chan wa Asobitai! Ep 2.mkv",
name="Uzaki-chan wa Asobitai!",
episodeStart=2,
extension="mkv",
)
| z411/trackma | tests/test_anime_info_extractor.py | Python | gpl-3.0 | 7,402 | [
"Galaxy"
] | 2ebb4461ab375ebbfaf261e25ac60d2a9e5bea39746550ba2c55e2c04c604536 |
''' Tests for pyleoclim.core.ui.MultipleSeries
Naming rules:
1. class: Test{filename}{Class}{method} with appropriate camel case
2. function: test_{method}_t{test_id}
Notes on how to test:
0. Make sure [pytest](https://docs.pytest.org) has been installed: `pip install pytest`
1. execute `pytest {directory_path}` in terminal to perform all tests in all testing files inside the specified directory
2. execute `pytest {file_path}` in terminal to perform all tests in the specified file
3. execute `pytest {file_path}::{TestClass}::{test_method}` in terminal to perform a specific test class/method inside the specified file
4. after `pip install pytest-xdist`, one may execute "pytest -n 4" to test in parallel with number of workers specified by `-n`
5. for more details, see https://docs.pytest.org/en/stable/usage.html
'''
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
import pytest
from urllib.request import urlopen
import json
import pyleoclim as pyleo
from pyleoclim.utils.tsmodel import (
ar1_sim,
colored_noise,
)
from pyleoclim.utils.decomposition import mcpca
# a collection of useful functions
def gen_normal(loc=0, scale=1, nt=100):
''' Generate random data with a Gaussian distribution
'''
t = np.arange(nt)
v = np.random.normal(loc=loc, scale=scale, size=nt)
return t, v
def gen_colored_noise(alpha=1, nt=100, f0=None, m=None, seed=None):
''' Generate colored noise
'''
t = np.arange(nt)
v = colored_noise(alpha=alpha, t=t, f0=f0, m=m, seed=seed)
return t, v
def load_data():
#Loads stott MD982176 record
try:
d = pyleo.Lipd(usr_path='http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004')
except:
d = pyleo.Lipd('./example_data/MD982176.Stott.2004.lpd')
return d
# Tests below
class TestUIMultipleSeriesDetrend():
@pytest.mark.parametrize('detrend_method',['linear','constant','savitzky-golay','emd'])
def test_detrend_t1(self, detrend_method):
alpha=1
t, v = gen_colored_noise(nt=550, alpha=alpha)
#Trends
slope = 1e-5
slope1= 2e-5
intercept = -1
nonlinear_trend = slope*t**2 + intercept
nonlinear_trend1 = slope1*t**2 + intercept
v_trend = v + nonlinear_trend
v_trend1 = v + nonlinear_trend1
#create series object
ts=pyleo.Series(time=t,value=v_trend)
ts1=pyleo.Series(time=t,value=v_trend1)
# Create a multiple series object
ts_all= pyleo.MultipleSeries([ts,ts1])
ts_detrend=ts_all.detrend(method=detrend_method)
class TestMultipleSeriesPlot:
'''Test for MultipleSeries.plot()
MultipleSeries.plot outputs a matplotlib figure and axis object with two datasets,
so we will compare the time axis of the axis object to the time arrays we generate,
and the value axis with the value arrays we generate'''
def test_plot(self):
#Generate time and value arrays
t_0, v_0 = gen_normal()
t_1, v_1 = gen_normal()
#Create series objects
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
#Create a list of series objects
serieslist = [ts_0, ts_1]
#Turn this list into a multiple series object
ts_M = pyleo.MultipleSeries(serieslist)
fig, ax = ts_M.plot()
lines_0 = ax.lines[0]
lines_1 = ax.lines[1]
x_plot_0 = lines_0.get_xdata()
y_plot_0 = lines_0.get_ydata()
x_plot_1 = lines_1.get_xdata()
y_plot_1 = lines_1.get_ydata()
assert_array_equal(t_0, x_plot_0)
assert_array_equal(t_1, x_plot_1)
assert_array_equal(v_0, y_plot_0)
assert_array_equal(v_1, y_plot_1)
class TestMultipleSeriesStandardize:
'''Test for MultipleSeries.standardize()
Standardize normalizes the multiple series object, so we'll simply test maximum and minimum values,
only now we are running the test on series in a MultipleSeries object'''
def test_standardize(self):
t_0, v_0 = gen_colored_noise()
t_1, v_1 = gen_colored_noise()
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
serieslist = [ts_0, ts_1]
ts_M = pyleo.MultipleSeries(serieslist)
ts_M_std = ts_M.standardize()
x_axis_0 = ts_M_std.series_list[0].__dict__['time']
x_axis_1 = ts_M_std.series_list[1].__dict__['time']
y_axis_0 = ts_M_std.series_list[0].__dict__['value']
y_axis_1 = ts_M_std.series_list[1].__dict__['value']
assert_array_equal(x_axis_0, t_0)
assert_array_equal(x_axis_1, t_1)
assert max(v_0) > max(y_axis_0)
assert max(v_1) > max(y_axis_1)
class TestMultipleSeriesBin:
'''Test for MultipleSeries.bin()
Testing if the bin function will place the series on the same time axis
'''
def test_bin(self):
t_0, v_0 = gen_colored_noise()
t_1, v_1 = gen_colored_noise()
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
serieslist = [ts_0, ts_1]
ts_M = pyleo.MultipleSeries(serieslist)
ts_M_bin = ts_M.bin()
x_axis_0 = ts_M_bin.series_list[0].__dict__['time']
x_axis_1 = ts_M_bin.series_list[1].__dict__['time']
assert_array_equal(x_axis_0, x_axis_1)
class TestMultipleSeriesInterp:
'''Test for MultipleSeries.interp()
Testing if the interp function will place the series on the same time axis
'''
def test_interp(self):
t_0, v_0 = gen_colored_noise()
t_1, v_1 = gen_colored_noise()
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
serieslist = [ts_0, ts_1]
ts_M = pyleo.MultipleSeries(serieslist)
ts_M_interp = ts_M.interp()
x_axis_0 = ts_M_interp.series_list[0].__dict__['time']
x_axis_1 = ts_M_interp.series_list[1].__dict__['time']
assert_array_equal(x_axis_0, x_axis_1)
class TestMultipleSeriesGkernel:
'''Test for MultipleSeries.gkernel()
Testing the gkernel function will place the series on the same time axis
'''
def test_gkernel(self):
t_0, v_0 = gen_colored_noise()
t_1, v_1 = gen_colored_noise()
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
serieslist = [ts_0, ts_1]
ts_M = pyleo.MultipleSeries(serieslist)
ts_M_gkernel = ts_M.gkernel()
x_axis_0 = ts_M_gkernel.series_list[0].__dict__['time']
x_axis_1 = ts_M_gkernel.series_list[1].__dict__['time']
assert_array_equal(x_axis_0, x_axis_1)
class TestMultipleSeriesPca:
'''Tests for MultipleSeries.pca()
Testing the PCA function
'''
def test_pca_t0(self):
'''
Test with synthetic data, no missing values, screeplot()
Returns
-------
None.
'''
p = 10; n = 100
signal = pyleo.gen_ts(model='colored_noise',nt=n,alpha=1.0).standardize()
X = signal.value[:,None] + np.random.randn(n,p)
t = np.arange(n)
mslist = []
for i in range(p):
mslist.append(pyleo.Series(time = t, value = X[:,i]))
ms = pyleo.MultipleSeries(mslist)
res = ms.pca()
# check that all variance was recovered
assert abs(res.pctvar.sum() - 100)<0.1
def test_pca_t1(self):
'''
Test with synthetic data, with missing values
'''
p = 10; n = 100
signal = pyleo.gen_ts(model='colored_noise',nt=n,alpha=1.0).standardize()
X = signal.value[:,None] + np.random.randn(n,p)
t = np.arange(n)
# poke some holes at random in the array
Xflat = X.flatten()
Xflat[np.random.randint(n*p, size=p-1)]=np.nan # note: at most ncomp missing vals
X = np.reshape(Xflat, (n,p))
#X[-1,0] = np.nan
mslist = []
for i in range(p):
mslist.append(pyleo.Series(time = t, value = X[:,i],clean_ts=False))
ms = pyleo.MultipleSeries(mslist)
res = ms.pca(ncomp=4,gls=True)
fig, ax = res.screeplot(mute=True)
def test_pca_t2(self):
'''
Test with real data, same time axis
'''
d=load_data()
tslist = d.to_LipdSeriesList()
tslist = tslist[2:]
ms = pyleo.MultipleSeries(tslist)
msl = ms.common_time() # put on common time
res = msl.pca()
res.screeplot(mute=True)
res.modeplot(mute=True)
def test_pca_t3(self):
'''
Test with synthetic data, no missing values, kwargs
Returns
-------
None.
'''
p = 10; n = 100
signal = pyleo.gen_ts(model='colored_noise',nt=n,alpha=1.0)
X = signal.value[:,None] + np.random.randn(n,p)
t = np.arange(n)
mslist = []
for i in range(p):
mslist.append(pyleo.Series(time = t, value = X[:,i]))
ms = pyleo.MultipleSeries(mslist)
res = ms.pca(method='eig',standardize=True,demean=False,normalize=True)
# check that all variance was recovered
assert abs(res.pctvar.sum() - 100)<0.001
class TestMultipleSeriesGridProperties:
'''Test for MultipleSeries.grid_properties()
'''
@pytest.mark.parametrize('step_style', ['min', 'max', 'mean', 'median'])
def test_grid_properties(self, step_style):
p = 10; n = 100
signal = pyleo.gen_ts(model='colored_noise',nt=n,alpha=1.0).standardize()
X = signal.value[:,None] + np.random.randn(n,p)
t = np.arange(n)
mslist = []
for i in range(p):
mslist.append(pyleo.Series(time = t, value = X[:,i]))
ms = pyleo.MultipleSeries(mslist)
gp = ms.grid_properties(step_style=step_style)
assert (gp[0,:] == np.array((t.min(), t.max(), 1.))).all()
# class TestMultipleSeriesMcPca:
# '''Test for MultipleSeries.mcpca()
# Testing the MC-PCA function
# '''
# def test_mcpca_t0(self):
# url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004'
# data = pyleo.Lipd(usr_path = url)
# tslist = data.to_LipdSeriesList()
# tslist = tslist[2:] # drop the first two series which only concerns age and depth
# ms = pyleo.MultipleSeries(tslist)
# # TO DO !!!!
# # msc = ms.common_time()
# # res = msc.pca(nMC=20)
class TestMultipleSeriesCommonTime:
'''Test for MultipleSeries.common_time()
'''
@pytest.mark.parametrize('method', ['bin', 'interp', 'gkernel'])
def test_common_time_t0(self, method):
t_0, v_0 = gen_colored_noise()
t_1, v_1 = gen_colored_noise()
ts_0 = pyleo.Series(time = t_0, value = v_0)
ts_1 = pyleo.Series(time = t_1, value = v_1)
serieslist = [ts_0, ts_1]
ts_M = pyleo.MultipleSeries(serieslist)
ts_M_ct = ts_M.common_time(method=method)
x_axis_0 = ts_M_ct.series_list[0].time
x_axis_1 = ts_M_ct.series_list[1].time
assert_array_equal(x_axis_0, x_axis_1)
def test_common_time_t1(self):
time = np.arange(1900, 2020, step=1/12)
ndel = 200
seriesList = []
for j in range(4):
v = pyleo.gen_ts(model='colored_noise',alpha=1, t=time)
deleted_idx = np.random.choice(range(np.size(time)), ndel, replace=False)
tu = np.delete(time.copy(), deleted_idx)
vu = np.delete(v.value, deleted_idx)
ts = pyleo.Series(time=tu, value=vu, value_name='Series_'+str(j+1))
seriesList.append(ts)
ms = pyleo.MultipleSeries(seriesList)
ms1 = ms.common_time(method='interp', start=1910, stop=2010, step=1/12)
assert (np.diff(ms1.series_list[0].time)[0] - 1/12) < 1e-3
class TestMultipleSeriesStackPlot():
''' Test for MultipleSeries.Stackplot
'''
@pytest.mark.parametrize('labels', [None, 'auto', ['sst','d18Osw']])
def test_StackPlot_t0(self, labels):
d=load_data()
sst = d.to_LipdSeries(number=5)
d18Osw = d.to_LipdSeries(number=3)
ms = pyleo.MultipleSeries([sst,d18Osw])
ms.stackplot(labels=labels, mute=True)
@pytest.mark.parametrize('plot_kwargs', [{'marker':'o'},[{'marker':'o'},{'marker':'^'}]])
def test_StackPlot_t1(self, plot_kwargs):
d=load_data()
sst = d.to_LipdSeries(number=5)
d18Osw = d.to_LipdSeries(number=3)
ms = pyleo.MultipleSeries([sst,d18Osw])
ms.stackplot(plot_kwargs=plot_kwargs, mute=True)
class TestMultipleSeriesSpectral():
''' Test for MultipleSeries.spectral
'''
def test_spectral_t0(self):
'''Test the spectral function with pre-generated scalogram objects
'''
d=load_data()
sst = d.to_LipdSeries(number=5)
d18Osw = d.to_LipdSeries(number=3)
ms = pyleo.MultipleSeries([sst,d18Osw])
scals = ms.wavelet()
psds = ms.spectral(method='wwz',scalogram_list=scals)
| LinkedEarth/Pyleoclim_util | pyleoclim/tests/test_ui_MultipleSeries.py | Python | gpl-3.0 | 13,572 | [
"Gaussian"
] | 80bd15cf1e383013a47dc0ea662dd536436588e7a5cc8ddd87ea8b63acf24d27 |
"""
test_frost_number_bmi.py
tests of the frost_number component of permamodel using bmi API
"""
import datetime
import os
import numpy as np
import pytest
from dateutil.relativedelta import relativedelta
from pytest import approx
from permamodel.components import bmi_frost_number_Geo, frost_number_Geo, perma_base
from .. import examples_directory
# Set the file names for the example cfg files
config_filename = os.path.join(examples_directory, "FrostnumberGeo_Default.cfg")
# List of files to be removed after testing is complete
# use files_to_remove.append(<filename>) to add to it
files_to_remove = []
def setup_module():
""" Standard fixture called before any tests in this file are performed """
pass
def teardown_module():
""" Standard fixture called after all tests in this file are performed """
# If need to remove files that are created:
# for f in files_to_remove:
# if os.path.exists(f):
# os.remove(f)
pass
# ---------------------------------------------------
# Tests that ensure we have bmi functionality
# Note: the netcdf functions seem to lead to errors if the netcdf
# files are not closed. This is done in the finalize() routine
# which should therefore be called after every test
# ---------------------------------------------------
def test_frost_number_Geo_has_initialize(tmpdir):
# Can we call an initialize function?
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize(cfg_file=config_filename)
fng.finalize() # Must have this or get IOError later
def test_frost_number_initialize_sets_year(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize(cfg_file=config_filename)
# Assert the values from the cfg file
assert fng._model._date_current.year == 1901
fng.finalize() # Must have this or get IOError later
def test_frost_number_initialize_sets_air_min_and_max(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize(cfg_file=config_filename)
# The temperature arrays are initialized to all NaNs
nan_array = np.zeros((3, 2), dtype=np.float32)
nan_array.fill(np.nan)
assert np.all(np.isnan(fng._model.T_air_min)) # == nan_array)
fng.finalize() # Must have this or get IOError later
def test_frost_number_update_increments_time(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
fng.update()
assert fng._model._date_current == fng._model._start_date + relativedelta(
years=fng._model._timestep_duration
)
fng.finalize() # Must have this or get IOError later
def test_frost_number_update_changes_air_frost_number(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
afn0 = fng._model.air_frost_number_Geo.copy()
fng.update()
afn1 = fng._model.air_frost_number_Geo.copy()
assert np.any(afn0 != afn1)
fng.update_until(1.0)
afn2 = fng._model.air_frost_number_Geo.copy()
assert np.all(afn1 == afn2)
fng.finalize() # Must have this or get IOError later
def test_frost_number_get_current_time_returns_scalar(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
current_time = fng.get_current_time()
assert isinstance(current_time, (float, int))
fng.finalize() # Must have this or get IOError later
def test_frost_number_get_end_time_returns_scalar(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
end_time = fng.get_end_time()
assert isinstance(end_time, float) or isinstance(end_time, int)
fng.finalize() # Must have this or get IOError later
def test_frost_number_implements_update_until(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
fng.update_until(fng.get_end_time())
assert fng._model._date_current == fng._model._end_date
fng.finalize() # Must have this or get IOError later
def test_FNGeo_computes_default_values(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
for n in range(5):
if (fng._model.T_air_min[0, 0] + fng._model.T_air_max[0, 0]) == 0:
assert fng._values["frostnumber__air"][0, 0] == 0.5
if (fng._model.T_air_min[0, 0] <= 0.0) and (
fng._model.T_air_max[0, 0] <= 0.0
):
assert fng._values["frostnumber__air"][0, 0] == 1.0
if (fng._model.T_air_min[0, 0] > 0.0) and (
fng._model.T_air_max[0, 0] > 0.0
):
assert fng._values["frostnumber__air"][0, 0] == 0.0
# print("FNGeo frostnumber__air: %d" % n)
# print(fng._model.T_air_min)
# print(fng._model.T_air_max)
# print(fng._values['frostnumber__air'])
fng.update()
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_attribute(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
# Check an attribute that exists
this_att = fng.get_attribute("time_units")
assert this_att == "years"
# Check an attribute that doesn't exist
with pytest.raises(KeyError):
fng.get_attribute("not_an_attribute")
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_input_var_names(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
assert "atmosphere_bottom_air__temperature" in fng.get_input_var_names()
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_output_var_names(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
assert "frostnumber__air" in fng.get_output_var_names()
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_set_value(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtempval = fng.get_value("atmosphere_bottom_air__temperature")
airtempnew = 123 * np.ones_like(airtempval)
fng.set_value("atmosphere_bottom_air__temperature", airtempnew)
assert not np.all(airtempval == approx(airtempnew))
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_grid_shape(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtemp_gridid = fng.get_var_grid("atmosphere_bottom_air__temperature")
assert (3, 2) == fng.get_grid_shape(airtemp_gridid)
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_grid_size(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtemp_gridid = fng.get_var_grid("atmosphere_bottom_air__temperature")
assert fng.get_grid_size(airtemp_gridid) == 6
fng.finalize() # Must have this or get IOError later
def test_FNGeo_get_grid_spacing(tmpdir):
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtemp_gridid = fng.get_var_grid("atmosphere_bottom_air__temperature")
assert np.all(np.array([1.0, 1.0]) == fng.get_grid_spacing(airtemp_gridid))
fng.finalize() # Must have this or get IOError later
def test_FNGeo_jan_and_jul_temperatures_are_grids(tmpdir):
""" Test that FNGeo BMI has input variables for jan and jul data """
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtemp_gridid = fng.get_var_grid("atmosphere_bottom_air__temperature")
jan_airtemp_gridid = fng.get_var_grid(
"atmosphere_bottom_air__temperature_mean_jan"
)
assert jan_airtemp_gridid is not None
jul_airtemp_gridid = fng.get_var_grid(
"atmosphere_bottom_air__temperature_mean_jul"
)
assert jul_airtemp_gridid is not None
fng.finalize() # Must have this or get IOError later
def test_FNGeo_can_set_current_and_jan_temperatures(tmpdir):
""" Test that FNGeo BMI can set jan temperature field """
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
airtemp_values = fng.get_value("atmosphere_bottom_air__temperature")
airtemp_values = np.ones_like(airtemp_values)
fng.set_value("atmosphere_bottom_air__temperature", airtemp_values)
jan_airtemp_values = fng.get_value(
"atmosphere_bottom_air__temperature_mean_jan"
)
jan_airtemp_values = np.ones_like(jan_airtemp_values)
fng.set_value("atmosphere_bottom_air__temperature_mean_jan", jan_airtemp_values)
assert np.all(
fng.get_value("atmosphere_bottom_air__temperature")
== fng.get_value("atmosphere_bottom_air__temperature_mean_jan")
)
fng.finalize() # Must have this or get IOError later
def test_FNGeo_update_zero_fraction_does_not_change_time(tmpdir):
""" Test that running update_frac(0) does not change the time """
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
fng.initialize()
init_time = fng.get_current_time()
fng.update_frac(0)
plus_zero_time = fng.get_current_time()
assert init_time == plus_zero_time
fng.update()
one_update_time = fng.get_current_time()
assert not np.all(init_time == one_update_time)
fng.finalize() # Must have this or get IOError later
def test_FNGeo_simulate_WMT_run(tmpdir):
""" Test that we can set values as if running in WMT """
with tmpdir.as_cwd():
fng = bmi_frost_number_Geo.BmiFrostnumberGeoMethod()
wmt_cfg_file = os.path.join(examples_directory, "FNGeo_WMT_testing.cfg")
fng.initialize(wmt_cfg_file)
assert fng._name == "Permamodel FrostnumberGeo Component"
# Until set, e.g. by WMT with cru values, all vals are NaN
# Note: these are actually setting references to the underlying
# model arrays!
airtemp_values = fng.get_value("atmosphere_bottom_air__temperature")
jan_airtemp_values = fng.get_value(
"atmosphere_bottom_air__temperature_mean_jan"
)
jul_airtemp_values = fng.get_value(
"atmosphere_bottom_air__temperature_mean_jul"
)
# In WMT mode, must set monthly temperature values, then run update_frac(0)
# to get valid values in frost number array
# use January as 'coldest'
# use July as 'warmest'
airtemps_of_one = np.ones_like(airtemp_values)
jan_airtemp_values[:] = -10.0 * airtemps_of_one
jul_airtemp_values[:] = 10.0 * airtemps_of_one
fng.set_value("atmosphere_bottom_air__temperature_mean_jan", jan_airtemp_values)
fng.set_value("atmosphere_bottom_air__temperature_mean_jul", jul_airtemp_values)
fng.update_frac(0)
airfn_values = fng.get_value("frostnumber__air")
assert np.all(0.5 * airtemps_of_one == airfn_values)
fng.finalize() # Must have this or get IOError later
| permamodel/permamodel | permamodel/tests/test_frost_number_Geo_bmi.py | Python | mit | 11,868 | [
"NetCDF"
] | 57004a227c157d335e2348750812093ece6243b77a132dfaaf46150ab946e6bd |
################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `gamma` module.
"""
import warnings
warnings.simplefilter("error")
import numpy as np
from scipy import special
from numpy import testing
from .. import gaussian
from bayespy.nodes import (Gaussian,
GaussianARD,
GaussianGammaISO,
Gamma,
Wishart)
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestGammaGradient(TestCase):
"""Numerically check Riemannian gradient of several nodes.
Using VB-EM update equations will take a unit length step to the
Riemannian gradient direction. Thus, the change caused by a VB-EM
update and the Riemannian gradient should be equal.
"""
def test_riemannian_gradient(self):
"""Test Riemannian gradient of a Gamma node."""
#
# Without observations
#
# Construct model
a = np.random.rand()
b = np.random.rand()
tau = Gamma(a, b)
# Random initialization
tau.initialize_from_parameters(np.random.rand(),
np.random.rand())
# Initial parameters
phi0 = tau.phi
# Gradient
g = tau.get_riemannian_gradient()
# Parameters after VB-EM update
tau.update()
phi1 = tau.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
#
# With observations
#
# Construct model
a = np.random.rand()
b = np.random.rand()
tau = Gamma(a, b)
mu = np.random.randn()
Y = GaussianARD(mu, tau)
Y.observe(np.random.randn())
# Random initialization
tau.initialize_from_parameters(np.random.rand(),
np.random.rand())
# Initial parameters
phi0 = tau.phi
# Gradient
g = tau.get_riemannian_gradient()
# Parameters after VB-EM update
tau.update()
phi1 = tau.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
pass
def test_gradient(self):
"""Test standard gradient of a Gamma node."""
D = 3
np.random.seed(42)
#
# Without observations
#
# Construct model
a = np.random.rand(D)
b = np.random.rand(D)
tau = Gamma(a, b)
Q = VB(tau)
# Random initialization
tau.initialize_from_parameters(np.random.rand(D),
np.random.rand(D))
# Initial parameters
phi0 = tau.phi
# Gradient
rg = tau.get_riemannian_gradient()
g = tau.get_gradient(rg)
# Numerical gradient
eps = 1e-8
p0 = tau.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [np.zeros(D), np.zeros(D)]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
tau.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[1] + e
tau.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1][i] = (l1 - l0) / eps
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# With observations
#
# Construct model
a = np.random.rand(D)
b = np.random.rand(D)
tau = Gamma(a, b)
mu = np.random.randn(D)
Y = GaussianARD(mu, tau)
Y.observe(np.random.randn(D))
Q = VB(Y, tau)
# Random initialization
tau.initialize_from_parameters(np.random.rand(D),
np.random.rand(D))
# Initial parameters
phi0 = tau.phi
# Gradient
rg = tau.get_riemannian_gradient()
g = tau.get_gradient(rg)
# Numerical gradient
eps = 1e-8
p0 = tau.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [np.zeros(D), np.zeros(D)]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
tau.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[1] + e
tau.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1][i] = (l1 - l0) / eps
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
pass
| SalemAmeen/bayespy | bayespy/inference/vmp/nodes/tests/test_gamma.py | Python | mit | 5,650 | [
"Gaussian"
] | 8e94e6c621b8426985ec58163fb31313abc22e8411d42e2ca70e994544c088e1 |
import lie_group_diffeo as lgd
import odl
import numpy as np
action_type = 'geometric'
transform_type = 'affine'
space = odl.uniform_discr([-1, -1], [1, 1], [101, 101], interp='linear')
coord_space = odl.uniform_discr([-1, -1], [1, 1], [101, 101], interp='linear').tangent_bundle
# Select deformation type of the target
if transform_type == 'affine':
transform = odl.deform.LinDeformFixedDisp(
space.tangent_bundle.element([lambda x: -0.1,
lambda x: 0.1 + x[1] * 0.1]))
elif transform_type == 'rotate':
theta = 0.2
transform = odl.deform.LinDeformFixedDisp(
space.tangent_bundle.element([lambda x: (np.cos(theta) - 1) * x[0] + np.sin(theta) * x[1],
lambda x: -np.sin(theta) * x[0] + (np.cos(theta) - 1) * x[1]]))
else:
assert False
# Make a parallel beam geometry with flat detector
angle_partition = odl.uniform_partition(np.pi / 3, 2 * np.pi * 2.0 / 3, 10)
detector_partition = odl.uniform_partition(-2, 2, 300)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
ray_trafo = odl.tomo.RayTransform(space, geometry, impl='astra_cuda')
# Create template and target
template = odl.phantom.shepp_logan(space, modified=True)
target = transform(template)
target, template = template, target
data = ray_trafo(target)
# template, target = target, template
# Define data matching functional
data_matching = odl.solvers.L2Norm(ray_trafo.range).translated(data) * ray_trafo
lie_grp = lgd.Diff(space, coord_space=coord_space)
geometric_deform_action = lgd.GeometricDeformationAction(lie_grp, space)
scale_action = lgd.JacobianDeterminantScalingAction(lie_grp, space)
if action_type == 'mass_preserving':
deform_action = lgd.ComposedAction(geometric_deform_action, scale_action)
elif action_type == 'geometric':
deform_action = geometric_deform_action
else:
assert False
w = space.one()
weighting = coord_space[0].element(
lambda x: np.exp(-sum((xi/0.70)**10 for xi in x)))
grid = space.element(lambda x: np.cos(x[0] * np.pi * 5)**20 + np.cos(x[1] * np.pi * 5)**20)
# Create regularizing functional
# regularizer = 3 * odl.solvers.KullbackLeibler(space, prior=w)
regularizer = 0.5 * (odl.solvers.L2NormSquared(space) * weighting).translated(w)
# Create action
regularizer_action = lgd.JacobianDeterminantScalingAction(lie_grp, space)
# Initial guess
g = lie_grp.identity
# Combine action and functional into single object.
action = lgd.ProductSpaceAction(deform_action, regularizer_action, geometric_deform_action)
x = action.domain.element([template, w, grid]).copy()
f = odl.solvers.SeparableSum(data_matching, regularizer, odl.solvers.ZeroFunctional(space))
# Show some results, reuse the plot
template.show('template')
target.show('target')
# Create callback that displays the current iterate and prints the function
# value
callback = odl.solvers.CallbackShow('diffemorphic matching', step=20)
callback &= odl.solvers.CallbackPrint(f)
# Smoothing
filter_width = 0.5 # standard deviation of the Gaussian filter
ft = odl.trafos.FourierTransform(space)
c = filter_width ** 2 / 4.0 ** 2
gaussian = ft.range.element(lambda x: np.exp(-np.sqrt((x[0] ** 2 + x[1] ** 2) * c)))
convolution = ft.inverse * gaussian * ft
class AinvClass(odl.Operator):
def _call(self, x):
return [convolution(di) for di in x.data]
Ainv = AinvClass(domain=lie_grp.associated_algebra, range=lie_grp.associated_algebra, linear=True)
# Step length method
def steplen(itern):
return 3e-1 / (10 + itern)
line_search = odl.solvers.PredefinedLineSearch(steplen)
# Solve via gradient flow
result = lgd.gradient_flow_solver(x, f, g, action, Ainv=Ainv,
niter=2000, line_search=line_search,
callback=callback)
result.data.show('Resulting diffeo')
(result.data - lie_grp.identity.data).show('translations')
(result.data_inv - lie_grp.identity.data).show('translations inverse')
| adler-j/lie_grp_diffeo | examples/diffemorphism_2d_tomo.py | Python | gpl-3.0 | 3,986 | [
"Gaussian"
] | 5baf5021c962c8751613066480a109218e407a8116cbb36850150d06e1b49b2b |
import numpy as np
from scipy.linalg import eig
from mjhmc.samplers.algebraic_hmc import AlgebraicDiscrete, AlgebraicContinuous
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from exceptions import RuntimeError
def get_eigs(sampler, order, steps=1000, energies=None):
"""Runs the sampler, returns the l1 normalized eigs
"""
hmc = sampler(order, energies=energies)
for _ in xrange(steps):
hmc.sampling_iteration()
t = hmc.get_transition_matrix()
return eig(t, left=True, right=False)
def mixing_times(H, trials=10):
"""runs the two samplers with the given energy a bunch of times
reports back their average mixing times
"""
order = len(H) * 2
c_tm = np.zeros(trials)
d_tm = np.zeros(trials)
for i in xrange(trials):
# todo: add reset methods
print "trial: {}".format(i)
hmc = AlgebraicDiscrete(order, energies=H)
chmc = AlgebraicContinuous(order, energies=H)
d_tm[i] = hmc.calculate_mixing_time()
c_tm[i] = chmc.calculate_mixing_time()
print "Average mixing time for discrete sampler: {}".format(np.mean(d_tm))
print "Average mixing time for continuous sampler: {}".format(np.mean(c_tm))
def test_sampler(sampler, H, steps=1000):
"""Runs the sampler on the given energy
Prints a bunch of statistics about how well it's doing
returns t_obs, distr_obs
"""
order = len(H) * 2
smp = sampler(order, energies=H)
smp.sample(steps)
t_obs = smp.get_transition_matrix()
print "Predicted distribution: {} \n".format(smp.prd_distr)
print "Observed distribution: {} \n".format(smp.get_distr())
print "Sampling error (L1): {} \n".format(smp.sampling_err())
print "Observed transition matrix: \n {} \n".format(t_obs)
print "Eigenspectrum of observed transition matrix: \n"
eigs = rectify_evecs(eig(t_obs, left=True, right=False))
pprint_eigs(eigs)
return t_obs, smp.get_distr()
def pprint_eigs(eigs):
"""eigs: output of linalg.eig
pretty prints the results
"""
for l, vec in zip(eigs[0], eigs[1]):
print "Eigenvalue: {} \n".format(l)
print "Eigenvector: {} \n".format(list(vec))
def rectify_evecs(eigs):
"""
eigs: output of linalg.eig
normalizes evecs by L1 norm, truncates small complex components,
ensures things are positive
"""
evecs = eigs[1].T
l1_norm = np.abs(evecs).sum(axis=1)
norm_evecs = evecs / l1_norm[:, np.newaxis]
real_evals = [np.around(np.real_if_close(l), decimals=5) for l in eigs[0]]
real_evecs = []
for v in norm_evecs:
real_v = np.real_if_close(v)
if (real_v < 0).all():
real_v *= -1
real_evecs.append(real_v)
# skip sorting for now: argsort is pain because numpy will typecase to complex arr
# desc_idx = np.argsort(real_evals)[::-1]
# return real_evals[desc_idx], real_evecs[desc_idx]
return real_evals, real_evecs
def calc_spectral_gaps(order, trials=1, n_sample_step=1000):
"""Approximates the spectral gap for each sampler at a certain order
returns avg_discrete_sg, discrete_sg_var, avg_continuous_sg, continuous_sg_var
"""
assert order % 2 == 0
# normally distributed?
H = np.random.randn(order / 2)
c_sg = np.zeros(trials)
h_sg = np.zeros(trials)
print "Order: {}".format(order)
for i in xrange(trials):
hmc = AlgebraicDiscrete(order, energies=H)
chmc = AlgebraicContinuous(order, energies=H)
# runs until close to equilibrium distribution
n_hmc = hmc.calculate_mixing_time()
n_chmc = chmc.calculate_mixing_time()
h_sg[i] = sg(hmc)
c_sg[i] = sg(chmc)
print "{} samplings steps for hmc to approach equilibirium".format(n_hmc)
print "{} samplings steps for chmc to approach equilibirium".format(n_chmc)
return np.mean(h_sg), np.std(h_sg), np.mean(c_sg), np.std(c_sg)
def sg(sampler):
"""returns the spectral gap
t: transition matrix
"""
while True:
try:
t = sampler.get_empirical_transition_matrix()
w,v = eig(t)
w_ord = np.sort(w)[::-1]
if np.around(np.real_if_close(w_ord[0]), decimals=5) != 1:
raise Exception("no eval with value 1")
return 1 - np.absolute(w_ord[1])
except RuntimeError:
sampler.sample(1000)
def plot_sgs(max_ord=100):
"""Saves a plot of spectral gap against order
"""
plt.clf()
plt.ion()
orders = np.arange(2, max_ord) * 2
sgs = [calc_spectral_gaps(o) for o in orders]
avg_h_sg, std_h_sg, avg_c_sg, std_c_sg = zip(*sgs)
plt.errorbar(orders, avg_h_sg, yerr=std_h_sg, label='Discrete sampler')
plt.errorbar(orders, avg_c_sg, yerr=std_c_sg, label='Continuous sampler')
plt.title("Spectral gaps on random gaussian state ladders")
plt.legend()
| rueberger/MJHMC | mjhmc/misc/mixing.py | Python | gpl-2.0 | 4,915 | [
"Gaussian"
] | e2e80831cebd7e468b5d1a2d89fb12c4d6e90825fb918682cf93363311fd22b6 |
#! /usr/bin/env python3
from vasppy import procar
from vasppy.outcar import reciprocal_lattice_from_outcar
import argparse
def minimum_length( nmin ):
class MinimumLength( argparse.Action ):
def __call__( self, parser, args, values, option_string=None ):
if not nmin <= len( values ):
msg = 'argument "{f}" requires at least {nmin} arguments'.format( f = self.dest, nmin = nmin )
raise argparse.ArgumentError( self, msg )
setattr( args, self.dest, values )
return MinimumLength
def main():
parser = argparse.ArgumentParser( description='Calculate an effective mass from a VASP PROCAR using a fitted quadratic' )
parser.add_argument( '-k', '--k-points', help='index of k-points for calculating effective mass', nargs='+', type=int, required=True, action=minimum_length( 2 ) )
parser.add_argument( '-b', '--band-index', help='index of band for calculating effective mass', type=int, required=True )
parser.add_argument( '-f', '--procar', help='PROCAR filename (default PROCAR)', type=str, default='PROCAR' )
parser.add_argument( '-v', '--verbose', help='Verbose output', action='store_true' )
parser.add_argument( '-o', '--outcar', help='OUTCAR filename (default OUTCAR)', type=str, default='OUTCAR' )
parser.add_argument( '-s', '--spin', help='select spin channel (default 1 / non-spin-polarised)', type=int, default='1' )
args = parser.parse_args()
reciprocal_lattice = reciprocal_lattice_from_outcar( 'OUTCAR' ) # Move reading the reciprocal lattice to procar.py
pcar = procar.Procar()
pcar.read_from_file( args.procar )
effective_mass = pcar.effective_mass_calc( k_point_indices = args.k_points,
band_index = args.band_index,
reciprocal_lattice = reciprocal_lattice,
spin = args.spin,
printing = args.verbose )
print( effective_mass )
if __name__ == '__main__':
main()
| bjmorgan/vasppy | vasppy/scripts/effective_mass.py | Python | mit | 2,107 | [
"VASP"
] | a2d1642484be2d92a7b98698a2cddcaac376e622ef1cabe14e905a54d405c95b |
"""
test views
"""
import datetime
import json
import re
import pytz
import ddt
import unittest
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.field_overrides import OverrideFieldData # pylint: disable=import-error
from courseware.tests.factories import StudentModuleFactory # pylint: disable=import-error
from courseware.tests.helpers import LoginEnrollmentTestCase # pylint: disable=import-error
from courseware.tabs import get_course_tab_list
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.test import RequestFactory
from edxmako.shortcuts import render_to_response # pylint: disable=import-error
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
import xmodule.tabs as tabs
from ..models import (
CustomCourseForEdX,
CcxMembership,
CcxFutureMembership,
)
from ..overrides import get_override_for_ccx, override_field_for_ccx
from .. import ACTIVE_CCX_KEY
from .factories import (
CcxFactory,
CcxMembershipFactory,
CcxFutureMembershipFactory,
)
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
def ccx_dummy_request():
"""
Returns dummy request object for CCX coach tab test
"""
factory = RequestFactory()
request = factory.get('ccx_coach_dashboard')
request.user = MagicMock()
return request
@attr('shard_1')
class TestCoachDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
def setUp(self):
"""
Set up tests
"""
super(TestCoachDashboard, self).setUp()
self.course = course = CourseFactory.create()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
self.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=course)
for _ in xrange(2)]
sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in chapters
])
verticals = flatten([
[
ItemFactory.create(
due=due, parent=sequential, graded=True, format='Homework'
) for _ in xrange(2)
] for sequential in sequentials
])
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in verticals
])
def make_coach(self):
"""
create coach user
"""
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
def make_ccx(self):
"""
create ccx
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
return ccx
def get_outbox(self):
"""
get fake outbox
"""
from django.core import mail
return mail.outbox
def test_not_a_coach(self):
"""
User is not a coach, should get Forbidden response.
"""
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_no_ccx_created(self):
"""
No CCX is created, coach should see form to add a CCX.
"""
self.make_coach()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search(
'<form action=".+create_ccx"',
response.content))
def test_create_ccx(self):
"""
Create CCX. Follow redirect to coach dashboard, confirm we see
the coach dashboard for the new CCX.
"""
self.make_coach()
url = reverse(
'create_ccx',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'name': 'New CCX'})
self.assertEqual(response.status_code, 302)
url = response.get('location') # pylint: disable=no-member
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search('id="ccx-schedule"', response.content))
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_edit_schedule(self, today):
"""
Get CCX schedule, modify it, save it.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.test_create_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
self.assertEqual(len(schedule), 2)
self.assertEqual(schedule[0]['hidden'], True)
self.assertEqual(schedule[0]['start'], None)
self.assertEqual(schedule[0]['children'][0]['start'], None)
self.assertEqual(schedule[0]['due'], None)
self.assertEqual(schedule[0]['children'][0]['due'], None)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], None
)
url = reverse(
'save_ccx',
kwargs={'course_id': self.course.id.to_deprecated_string()})
def unhide(unit):
"""
Recursively unhide a unit and all of its children in the CCX
schedule.
"""
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
unhide(schedule[0])
schedule[0]['start'] = u'2014-11-20 00:00'
schedule[0]['children'][0]['due'] = u'2014-12-25 00:00' # what a jerk!
response = self.client.post(
url, json.dumps(schedule), content_type='application/json'
)
schedule = json.loads(response.content)['schedule']
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], u'2014-11-20 00:00')
self.assertEqual(
schedule[0]['children'][0]['due'], u'2014-12-25 00:00'
)
# Make sure start date set on course, follows start date of earliest
# scheduled chapter
ccx = CustomCourseForEdX.objects.get()
course_start = get_override_for_ccx(ccx, self.course, 'start')
self.assertEqual(str(course_start)[:-9], u'2014-11-20 00:00')
# Make sure grading policy adjusted
policy = get_override_for_ccx(ccx, self.course, 'grading_policy',
self.course.grading_policy)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertEqual(policy['GRADER'][0]['min_count'], 4)
self.assertEqual(policy['GRADER'][1]['type'], 'Lab')
self.assertEqual(policy['GRADER'][1]['min_count'], 0)
self.assertEqual(policy['GRADER'][2]['type'], 'Midterm Exam')
self.assertEqual(policy['GRADER'][2]['min_count'], 0)
self.assertEqual(policy['GRADER'][3]['type'], 'Final Exam')
self.assertEqual(policy['GRADER'][3]['min_count'], 0)
def test_enroll_member_student(self):
"""enroll a list of students who are members of the class
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([student.email, ]), # pylint: disable=no-member
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(student.email in outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership exists for this student
self.assertTrue(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_unenroll_member_student(self):
"""unenroll a list of students who are members of the class
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
# student is member of CCX:
CcxMembershipFactory(ccx=ccx, student=student)
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Unenroll',
'student-ids': u','.join([student.email, ]), # pylint: disable=no-member
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(student.email in outbox[0].recipients()) # pylint: disable=no-member
# the membership for this student is gone
self.assertFalse(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_enroll_non_user_student(self):
"""enroll a list of students who are not users yet
"""
test_email = "[email protected]"
self.make_coach()
ccx = self.make_ccx()
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([test_email, ]),
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(test_email in outbox[0].recipients())
self.assertTrue(
CcxFutureMembership.objects.filter(
ccx=ccx, email=test_email
).exists()
)
def test_unenroll_non_user_student(self):
"""unenroll a list of students who are not users yet
"""
test_email = "[email protected]"
self.make_coach()
ccx = self.make_ccx()
outbox = self.get_outbox()
CcxFutureMembershipFactory(ccx=ccx, email=test_email)
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Unenroll',
'student-ids': u','.join([test_email, ]),
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(test_email in outbox[0].recipients())
self.assertFalse(
CcxFutureMembership.objects.filter(
ccx=ccx, email=test_email
).exists()
)
def test_manage_add_single_student(self):
"""enroll a single student who is a member of the class already
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
# no emails have been sent so far
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_manage_student',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'student-action': 'add',
'student-id': u','.join([student.email, ]), # pylint: disable=no-member
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(outbox, [])
# a CcxMembership exists for this student
self.assertTrue(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_manage_remove_single_student(self):
"""unenroll a single student who is a member of the class already
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
CcxMembershipFactory(ccx=ccx, student=student)
# no emails have been sent so far
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_manage_student',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'student-action': 'revoke',
'student-id': u','.join([student.email, ]), # pylint: disable=no-member
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(outbox, [])
# a CcxMembership exists for this student
self.assertFalse(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
GET_CHILDREN = XModuleMixin.get_children
def patched_get_children(self, usage_key_filter=None): # pylint: disable=missing-docstring
def iter_children(): # pylint: disable=missing-docstring
print self.__dict__
for child in GET_CHILDREN(self, usage_key_filter=usage_key_filter):
child._field_data_cache = {} # pylint: disable=protected-access
if not child.visible_to_staff_only:
yield child
return list(iter_children())
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
@patch('xmodule.x_module.XModuleMixin.get_children', patched_get_children, spec=True)
class TestCCXGrades(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
def setUp(self):
"""
Set up tests
"""
super(TestCCXGrades, self).setUp()
self.course = course = CourseFactory.create()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
chapter = ItemFactory.create(
start=start, parent=course, category='sequential')
sections = [
ItemFactory.create(
parent=chapter,
category="sequential",
metadata={'graded': True, 'format': 'Homework'})
for _ in xrange(4)]
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
self.student = student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
CcxMembershipFactory(ccx=ccx, student=student, active=True)
for i, section in enumerate(sections):
for j in xrange(4):
item = ItemFactory.create(
parent=section,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
)
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=student,
course_id=self.course.id,
module_state_key=item.location
)
# Apparently the test harness doesn't use LmsFieldStorage, and I'm not
# sure if there's a way to poke the test harness to do so. So, we'll
# just inject the override field storage in this brute force manner.
OverrideFieldData.provider_classes = None
# pylint: disable=protected-access
for block in iter_blocks(course):
block._field_data = OverrideFieldData.wrap(coach, block._field_data)
new_cache = {'tabs': [], 'discussion_topics': []}
if 'grading_policy' in block._field_data_cache:
new_cache['grading_policy'] = block._field_data_cache['grading_policy']
block._field_data_cache = new_cache
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
patch_context = patch('ccx.views.get_course_by_id')
get_course = patch_context.start()
get_course.return_value = course
self.addCleanup(patch_context.stop)
override_field_for_ccx(ccx, course, 'grading_policy', {
'GRADER': [
{'drop_count': 0,
'min_count': 2,
'short_label': 'HW',
'type': 'Homework',
'weight': 1}
],
'GRADE_CUTOFFS': {'Pass': 0.75},
})
override_field_for_ccx(
ccx, sections[-1], 'visible_to_staff_only', True)
@patch('ccx.views.render_to_response', intercept_renderer)
def test_gradebook(self):
url = reverse(
'ccx_gradebook',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
url = reverse(
'ccx_grades_csv',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
headers, row = (
row.strip().split(',') for row in
response.content.strip().split('\n')
)
data = dict(zip(headers, row))
self.assertEqual(data['HW 01'], '0.75')
self.assertEqual(data['HW 02'], '0.5')
self.assertEqual(data['HW 03'], '0.25')
self.assertEqual(data['HW Avg'], '0.5')
self.assertTrue('HW 04' not in data)
@patch('courseware.views.render_to_response', intercept_renderer)
def test_student_progress(self):
patch_context = patch('courseware.views.get_course_with_access')
get_course = patch_context.start()
get_course.return_value = self.course
self.addCleanup(patch_context.stop)
self.client.login(username=self.student.username, password="test")
session = self.client.session
session[ACTIVE_CCX_KEY] = self.ccx.id # pylint: disable=no-member
session.save()
self.client.session.get(ACTIVE_CCX_KEY)
url = reverse(
'progress',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
@attr('shard_1')
class TestSwitchActiveCCX(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""Verify the view for switching which CCX is active, if any
"""
def setUp(self):
super(TestSwitchActiveCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
enrollment = CourseEnrollmentFactory.create(course_id=course.id)
self.user = enrollment.user
self.target_url = reverse(
'course_root', args=[course.id.to_deprecated_string()]
)
def register_user_in_ccx(self, active=False):
"""create registration of self.user in self.ccx
registration will be inactive unless active=True
"""
CcxMembershipFactory(ccx=self.ccx, student=self.user, active=active)
def revoke_ccx_registration(self):
"""
delete membership
"""
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=self.user
)
membership.delete()
def verify_active_ccx(self, request, id=None): # pylint: disable=redefined-builtin, invalid-name
"""verify that we have the correct active ccx"""
if id:
id = str(id)
self.assertEqual(id, request.session.get(ACTIVE_CCX_KEY, None))
def test_unauthorized_cannot_switch_to_ccx(self):
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
def test_unauthorized_cannot_switch_to_mooc(self):
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string()]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
def test_enrolled_inactive_user_cannot_select_ccx(self):
self.register_user_in_ccx(active=False)
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# if the ccx were active, we'd need to pass the ID of the ccx here.
self.verify_active_ccx(self.client)
def test_enrolled_user_can_select_ccx(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
self.verify_active_ccx(self.client, self.ccx.id)
def test_enrolled_user_can_select_mooc(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
# pre-seed the session with the ccx id
session = self.client.session
session[ACTIVE_CCX_KEY] = str(self.ccx.id)
session.save()
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string()]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
self.verify_active_ccx(self.client)
def test_unenrolled_user_cannot_select_ccx(self):
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# if the ccx were active, we'd need to pass the ID of the ccx here.
self.verify_active_ccx(self.client)
def test_unenrolled_user_switched_to_mooc(self):
self.client.login(username=self.user.username, password="test")
# pre-seed the session with the ccx id
session = self.client.session
session[ACTIVE_CCX_KEY] = str(self.ccx.id)
session.save()
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# we tried to select the ccx but are not registered, so we are switched
# back to the mooc view
self.verify_active_ccx(self.client)
def test_unassociated_course_and_ccx_not_selected(self):
new_course = CourseFactory.create()
self.client.login(username=self.user.username, password="test")
expected_url = reverse(
'course_root', args=[new_course.id.to_deprecated_string()]
)
# the ccx and the course are not related.
switch_url = reverse(
'switch_active_ccx',
args=[new_course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(expected_url)) # pylint: disable=no-member
# the mooc should be active
self.verify_active_ccx(self.client)
def test_missing_ccx_cannot_be_selected(self):
self.register_user_in_ccx()
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
# delete the ccx
self.ccx.delete() # pylint: disable=no-member
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# we tried to select the ccx it doesn't exist anymore, so we are
# switched back to the mooc view
self.verify_active_ccx(self.client)
def test_revoking_ccx_membership_revokes_active_ccx(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
# ensure ccx is active in the request session
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
self.client.get(switch_url)
self.verify_active_ccx(self.client, self.ccx.id)
# unenroll the user from the ccx
self.revoke_ccx_registration()
# request the course root and verify that the ccx is not active
self.client.get(self.target_url)
self.verify_active_ccx(self.client)
@ddt.ddt
class CCXCoachTabTestCase(ModuleStoreTestCase):
"""
Test case for CCX coach tab.
"""
def setUp(self):
super(CCXCoachTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.user)
def check_ccx_tab(self):
"""Helper function for verifying the ccx tab."""
request = RequestFactory().request()
request.user = self.user
all_tabs = get_course_tab_list(request, self.course)
return any(tab.type == 'ccx_coach' for tab in all_tabs)
@ddt.data(
(True, True, True),
(True, False, False),
(False, True, False),
(False, False, False),
(True, None, False)
)
@ddt.unpack
def test_coach_tab_for_ccx_advance_settings(self, ccx_feature_flag, enable_ccx, expected_result):
"""
Test ccx coach tab state (visible or hidden) depending on the value of enable_ccx flag, ccx feature flag.
"""
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': ccx_feature_flag}):
self.course.enable_ccx = enable_ccx
self.assertEquals(
expected_result,
self.check_ccx_tab()
)
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def iter_blocks(course):
"""
Returns an iterator over all of the blocks in a course.
"""
def visit(block):
""" get child blocks """
yield block
for child in block.get_children():
for descendant in visit(child): # wish they'd backport yield from
yield descendant
return visit(course)
| shubhdev/openedx | lms/djangoapps/ccx/tests/test_views.py | Python | agpl-3.0 | 32,224 | [
"VisIt"
] | 2cd7083c518978d463912224dc01bd7663fcc7f74a7e097252e2b49639055647 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text]
aliases: []
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
aliases: []
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select'
# set to generate locales:
debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect'
# Accept oracle license
debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
# Specifying package you can register/return the list of questions and current values
debconf: name='tzdata'
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']),
value= dict(required=False, type='str'),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
main()
| ag-wood/ansible-modules-extras | system/debconf.py | Python | gpl-3.0 | 5,267 | [
"Brian"
] | e9f43d90f5ad3626969d05f2d3a0ec6a986c3d3630bc873a51813a7784c27c68 |
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
import sys
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " inputFile outputFile [numberOfDimensions]")
sys.exit(1)
input_file = sys.argv[1]
output_file = sys.argv[2]
Dimension = 2
if len(sys.argv) > 3:
Dimension = int(sys.argv[3])
# Testing GPU Neighborhood Operator Image Filter
InputPixelType = itk.F
OutputPixelType = itk.F
InputImageType = itk.Image[InputPixelType, Dimension]
OutputImageType = itk.Image[OutputPixelType, Dimension]
InputGPUImageType = itk.GPUImage[InputPixelType, Dimension]
OutputGPUImageType = itk.GPUImage[OutputPixelType, Dimension]
input_image = itk.imread(input_file, InputPixelType)
input_gpu_image = itk.cast_image_filter(
input_image, in_place=False, ttype=(InputImageType, InputGPUImageType)
)
input_gpu_image.UpdateBuffers()
RealOutputPixelType = OutputPixelType
NeighborhoodFilterType = itk.NeighborhoodOperatorImageFilter[
InputImageType, OutputImageType, RealOutputPixelType
]
GPUNeighborhoodFilterType = itk.GPUNeighborhoodOperatorImageFilter[
InputGPUImageType, OutputGPUImageType, RealOutputPixelType
]
# Create 1D Gaussian operator
OperatorType = itk.GaussianOperator[RealOutputPixelType, Dimension]
oper = OperatorType()
oper.SetDirection(0)
oper.SetVariance(8.0)
oper.CreateDirectional()
# test 1~8 work units for CPU
for number_of_work_units in range(1, 9):
cpu_filter = NeighborhoodFilterType.New()
cpu_timer = itk.TimeProbe()
cpu_timer.Start()
cpu_filter.SetNumberOfWorkUnits(number_of_work_units)
cpu_filter.SetInput(input_image)
cpu_filter.SetOperator(oper)
cpu_filter.Update()
cpu_timer.Stop()
print(
"CPU NeighborhoodFilter took {0} seconds with {1} work units.\n".format(
cpu_timer.GetMean(), cpu_filter.GetNumberOfWorkUnits()
)
)
gpu_filter = GPUNeighborhoodFilterType.New()
gpu_timer = itk.TimeProbe()
gpu_timer.Start()
gpu_filter.SetInput(input_gpu_image)
gpu_filter.SetOperator(oper)
gpu_filter.Update()
gpu_filter.GetOutput().UpdateBuffers() # synchronization point (GPU->CPU memcpy)
gpu_timer.Stop()
print(f"GPU NeighborhoodFilter took {gpu_timer.GetMean()} seconds.\n")
output_image = itk.cast_image_filter(
gpu_filter.GetOutput(), ttype=(OutputGPUImageType, OutputImageType)
)
output_gpu_image = gpu_filter.GetOutput()
itk.imwrite(output_image, output_file)
| thewtex/ITK | Modules/Filtering/GPUImageFilterBase/wrapping/test/itkGPUNeighborhoodOperatorImageFilterTest.py | Python | apache-2.0 | 3,108 | [
"Gaussian"
] | e33dd4cb36fd98174285df041c82a8929e7ff91615f3b7b85108df60670881cd |
"""
The module contains classes and functions to generate different types of
simulated movements.
Author: Sivakumar Balasubramanian
Date: March 13, 2014
"""
import numpy as np
def data_span(data):
"""
Returns the ampltide span of the given list or array.
Parameters
----------
data : np.array, list, tuple
The data whose amplitude span is to be calcuated.
Returns
-------
span : float
The span, defined as the different between the max. and min.
values of the given data.
Notes
-----
Examples
--------
>>> data_span([-1,4,0,-23,27,0.573])
50
>>> data_span([])
Empty list or array provided! Span cannot be calculated.
"""
if len(data) == 0:
print("Empty list or array provided! Span cannot be calculated.")
return
return max(data) - min(data)
def mjt_discrete_movement(amp=1.0, dur=1.0, loc=0.,
time=np.arange(-0.5, 0.5, 0.01)):
"""
Generate a discrete Minumum Jerk Trajectory (MJT) movement speed profile
of the given amplitude, duration and time location for the given time span.
Parameters
----------
amp : float
Amplitude of the MJT discrete movement.
dur : float
Duration of the MJT discrete movement.
loc : float
The temporal location of the center of the MJT speed
profile.
time : np.array
The time values for which the speed profile values are to be
returned.
Returns
-------
movement : np.array
The movement speed profile of the MJT discrete movement.
Notes
-----
Examples
--------
"""
t = np.array([np.min([np.max([(_t + 0.5 * dur - loc) / dur, 0.]), 1.])
for _t in time])
return amp * (30 * np.power(t, 4) -
60 * np.power(t, 3) +
30 * np.power(t, 2))
def gaussian_discrete_movement(amp=1.0, dur=1.0, loc=0.,
time=np.arange(-0.5, 0.5, 0.01)):
"""
Generate a discrete Gaussian movement speed profile of the given amplitude,
duration and time location for the given time span.
Parameters
----------
amp : float
Amplitude of the Gaussian discrete movement.
dur : float
Duration of the Gaussian discrete movement.
loc : float
The temporal location of the center of the Gaussian speed
profile.
time : np.array
The time values for which the speed profile values are to be
returned.
Returns
-------
movement : np.array
The movement speed profile of the Gaussian discrete movement.
Notes
-----
Examples
--------
"""
return amp * np.exp(-pow(5.0 * (time - loc) / dur, 2))
def gaussian_rhytmic_movement(amp, dur, interval, ts, n_movements):
"""
Generates a rhythmic (repetitive) Gaussian movement speed profiles that
is the sum of individual Gaussian speed profiles.
Parameters
----------
amp : float
Amplitud of each submovement.
dur : float
Duration of each submovement.
interval : float
Time interval between the peaks two successive Gaussian
movements.
ts : float
n_movements : int
The number of Gaussian movements in the overall movementsnt.
Returns
-------
time : np.array
The time of the movement starting from 0.
movement : np.array
The movement speed profile of the Gaussian rhythmic movement.
Notes
-----
"""
time = np.arange(0., (n_movements - 1) * interval + dur, ts)
movement = np.zeros(len(time))
for i in xrange(n_movements):
movement += gaussian_discrete_movement(amp, dur,
loc=i * interval + 0.5 * dur,
time=time)
return time, movement
def generate_random_movement(move_type='gaussian'):
"""
Generates a random movement as a sum of Gaussian submovements. The number
of submovements, their duration, time location and amplitude are chosen
randomly.
Parameters
----------
move_type : string
This must be a string indicating the type of discrete
movement to use for generating the random movement.
Returns
-------
t : np.array
The time of the movement starting from 0.
movement : np.array
The speed profile of the generated random movement.
submovements : np.array
A two dimensional array containing the individual
submovements in the generated random movement. The number
of row is equal to the number of submovements, and the
number of columns corresponds to the number of data
points for the duration of the generated movement.
Notes
-----
"""
t_sample = 0.01
# get a random set of parameters for the movement.
Ns = np.random.randint(1, 10, 1)[0] # number of submovements
As = 0.8 * np.random.rand(Ns) + 0.2 # amplitude of submovements
Ts = 0.3 * np.random.rand(Ns) + 0.3 # duration of submovements
T0 = 0.5 * np.random.rand(Ns) # location of submovements
tmin = T0[0] - max(Ts) / 2
tmax = sum(T0) + max(Ts) / 2
t = np.arange(tmin, tmax, t_sample)
movement = np.zeros((tmax - tmin + t_sample) / t_sample)
submovements = [0] * Ns
# movement function
move_func = (gaussian_discrete_movement if move_type.lower() == 'gaussian'
else mjt_discrete_movement)
for i in xrange(Ns):
submovements[i] = move_func(As[i], Ts[i], sum(T0[:i + 1]), t)
movement += submovements[i]
return t, movement, np.array(submovements)
def generate_movement(Ns, amp, dT, T, ts=0.001, move_type='gaussian'):
"""
Generates a movement as sum of submovements with the given parameters.
Parameters
----------
Ns : int
This indicates the number of submovements
amp : np.array
The amplitude of the Ns submovements.
dT : np.array
This is the inter-submovement interval. This is
of length Ns-1, as it only contains the intervals
with repsect to the previous submovement. The
first submovement is assumed to start from zero,
i.e. the its center is located at half its duration.
T : np.array
The durations of the Ns submovements.
ts : float
This is the sampling duration.
move_type : string
This must be a string indicating the type of
submovement to use - 'gaussian' or 'mjt'.
Returns
-------
t : np.array
The time of the movement starting from 0.
movement : np.array
The speed profile of the generated random movement.
submovements : np.array
A two dimensional array containing the individual
submovements in the generated random movement. The number
of row is equal to the number of submovements, and the
number of columns corresponds to the number of data
points for the duration of the generated movement.
Notes
-----
"""
# get movement end time.
tmax = get_movement_endtime(dT, T)
t = np.arange(0., tmax, ts)
# initialize movement and submovement variables.
movement = np.zeros(len(t))
submovements = [0] * Ns
# get movement function
move_func = (gaussian_discrete_movement if move_type.lower() == 'gaussian'
else mjt_discrete_movement)
for i in xrange(Ns):
submovements[i] = move_func(amp[i], T[i], sum(dT[:i]) + 0.5 * T[i], t)
movement += submovements[i]
return t, movement, np.array(submovements)
def get_movement_endtime(dT, T):
"""
Returns the end time of the movement assuming that the start time is zero.
"""
_t = 0.
for i, _dT in enumerate(T):
_t = np.max([_t, np.sum(dT[:i]) + T[i]])
return _t
| siva82kb/SPARC | scripts/movements.py | Python | isc | 8,753 | [
"Gaussian"
] | fdb430e8303789e374351a94dae9a5c4e8eb6bafdee1824ff8dedbe7df9b2391 |
"""
===========================
PPV Grouping and Clustering
===========================
Functions and routines to perform clustering analysis on the BGPS HCO+/N2H+
molecular line survey.
"""
from __future__ import division
import numpy as np
import pandas as pd
import cPickle as pickle
from collections import deque
from multiprocessing import Pool
from scipy.special import erf
from .catalog import read_bgps_vel, read_cat, read_dpdf
from .coord import sep
class ClusterDBSCAN(object):
"""
Implimentation of DBSCAN cluster recognition algorithm.
Parameters
----------
df : pd.DataFrame
cols : list, default ['glon_peak', 'glat_peak', 'all_vlsr']
List of column names for Galactic longitude, latitude, and velocity in
decimal degrees and km/s.
flag_col : str, 'vlsr_f'
Flag column to select good velocities from.
lims : list, default [0.1, 3.5]
Coordinates search radius for [angle, velocity] where angle is written
in decimal degrees.
min_points : number, default 1
Minimum number of points in a nodes neighborhood in order to associate
it with a cluster.
Returns
-------
tree : dict
Attributes
----------
kdar_conflict_nodes : number
kdar_conflict_clusters : number
kdar_agree_nodes : number
kdar_agree_clusters : number
kdar_span_nodes : number
new_kdar_assoc : number
conflict_frac : number
cluster_ids : list
n_clusters : number
n_core_nodes : number
self.cluster_nodes : dict
Usage
-----
>>> c = ClusterDBSCAN()
>>> c.dbscan()
>>> c.analysis()
"""
ver = 'v210'
good_kdars = ['N', 'F', 'T', 'O']
def __init__(self,
cols=['glon_peak', 'glat_peak', 'all_vlsr'],
flag_col='vlsr_f',
lims=[0.065, 4.0],
min_points=1,
**kwargs):
# Set values
self.cols = cols
self.flag_col = flag_col
self.lims = lims
self.min_points = 1
# Initialize tree and BGPS
self.cluster_id = 0
self.read_data()
self.velo_to_ang = lims[0] / lims[1]
self.__scanned = False
def dbscan(self):
"""
Group clusters of points in PPV-space using the `DBSCAN` algorithm.
"""
df = self.df
for ii in df.index:
if self.tree[ii][0]:
continue
# Mark as visited
self.tree[ii][0] = True
neighbors = self.region_query(ii)
self.tree[ii][2].extend(neighbors)
if len(neighbors) <= self.min_points:
self.tree[ii][1] = -1
else:
self.cluster_id += 1
self.expand_cluster(ii, neighbors)
self.__scanned = True
def expand_cluster(self, ix, neighbors):
"""
Recursively search the nodes of `tree` if they are flagged as
univisited and add them to the current `cluster_id`. The `self.tree` is
modified in place.
Parameters
----------
ix : number
Node dataframe index number
neighbors : array-like
Node neighbor indices
"""
self.tree[ix][1] = self.cluster_id
neighbors = set(neighbors)
visited_neighbors = set()
# Recursively search neighbors
while neighbors:
ii = neighbors.pop()
visited = self.tree[ii][0]
node_id = self.tree[ii][1]
visited_neighbors.add(ii)
if not visited:
# Mark as visited
self.tree[ii][0] = True
branch = set(self.region_query(ii))
self.tree[ii][2].extend(branch)
# Union branch to current set of neighbors if not visited
if len(branch) > self.min_points:
neighbors.update(branch.difference(visited_neighbors))
# If not yet a member, assign to cluster
if node_id == 0:
self.tree[ii][1] = self.cluster_id
def region_query(self, ix):
"""
Search the node's coordinate neighborhood of index `ix` and return
indices of neighbors.
Parameters
----------
ix : number
Node dataframe index number
Returns
-------
neighbors : np.array
Array of neighbor node indices
"""
df = self.df
lim_a, lim_v = self.lims
cols = self.cols
l, b, v = df.ix[ix, cols].values
# Select box
df = df[(df[cols[0]] > l - lim_a) &
(df[cols[0]] < l + lim_a) &
(df[cols[1]] > b - lim_a) &
(df[cols[1]] < b + lim_a) &
(df[cols[2]] > v - lim_v) &
(df[cols[2]] < v + lim_v)]
# Coordinate query
# FIXME doesn't do correct latitude transformation
neighbors = df[(df[cols[0]] - l)**2 + (df[cols[1]] - b)**2 +
self.velo_to_ang**2 * (df[cols[2]] - v)**2 <
lim_a**2].index.values
return neighbors
def read_data(self):
"""
Read and process data for DPDFs, velocity catalog, and select sources
with well-resolved KDAs.
"""
df = read_cat('bgps_' + self.ver + '_vel').set_index(self.ver + 'cnum')
df = df[df[self.cols[2]].notnull()]
dpdf = read_cat('bgps_' + self.ver + '_kdars')
cnums = dpdf[self.ver + 'cnum']
kdars = dpdf['kdar']
# Check well resolved KDAs
dpdf_mask = np.in1d(kdars, self.good_kdars)
# Assign as instance variables
self.good_cnums = zip(cnums[dpdf_mask], kdars[dpdf_mask])
self.dpdf = dpdf
self.df = df
self.tree = {ix : [False, 0, []] for ix in df.index}
def analysis(self, verbose=False):
"""
Analyze the built tree. Requires `dbscan` to have been already run.
Parameters
----------
verbose : bool
Print results to terminal.
"""
if not self.__scanned:
raise Exception('Tree has not been built, run `dbscan`.')
tree = self.tree
df = self.df
# Number of clusters
cluster_ids = np.unique([row[1] for row in tree.values()])
n_clusters = cluster_ids.shape[0]
# Cluster nodes
cluster_nodes = {ix : [[], [], 0] for ix in cluster_ids}
for cid in cluster_ids:
nodes = []
for ix in df.index:
if tree[ix][1] == cid:
nodes.extend(tree[ix][2])
cluster_nodes[cid][0].extend(np.unique(nodes))
# Nodes in clusters
core_nodes = cluster_nodes.copy()
del core_nodes[-1]
n_core_nodes = np.ravel(core_nodes.values()).shape[0]
# KDAR nodes in cluster
good_cnums = self.good_cnums
self.kdar_skipped = 0
for ii, kdar in good_cnums:
# Should only continue if more stringent velocity flags
if ii not in df.index:
self.kdar_skipped += 1
continue
cid = self.tree[ii][1]
cluster_nodes[cid][1].append(kdar)
# Check unique KDARs
for cid in cluster_ids:
if cid == -1:
continue
kdar_assoc = cluster_nodes[cid][1]
kdar_unique = np.unique(kdar_assoc)
kdar_disagree = ('N' in kdar_unique) & ('F' in kdar_unique)
outer = 'O' in kdar_unique
group_f = 0
if outer:
group_f = 3
elif len(kdar_unique) == 1:
group_f = 1
elif (len(kdar_unique) > 1) & (not kdar_disagree):
group_f = 1
elif (len(kdar_unique) > 1) & kdar_disagree:
group_f = 2
cluster_nodes[cid][2] = group_f
# Number of nodes in clusters with KDAR conflicts
self.kdar_conflict_nodes = sum([len(v[0]) for k, v in
cluster_nodes.iteritems() if (v[2] == 2) & (k != -1)])
self.kdar_conflict_clusters = len([len(v[0]) for k, v in
cluster_nodes.iteritems() if (v[2] == 2) & (k != -1)])
# Number of nodes in clusters with agreeing KDARs
self.kdar_agree_nodes = sum([len(v[0]) for k, v in
cluster_nodes.iteritems() if (v[2] == 1) & (len(v[1]))])
self.kdar_agree_clusters = len([len(v[0]) for k, v in
cluster_nodes.iteritems() if (v[2] == 1) & (len(v[1]))])
# Number of nodes in cluster with KDARs
self.kdar_span_nodes = sum([len(v[0]) for k, v in
cluster_nodes.iteritems() if (v[2] in [1,2,3]) & (k != -1)])
self.new_kdar_assoc = sum([len(v[0]) - len(v[1]) for k, v in
cluster_nodes.iteritems() if (v[2] in [1, 2]) & (k != -1)])
self.conflict_frac = self.kdar_conflict_nodes / \
(self.kdar_agree_nodes + self.kdar_conflict_nodes)
# Assign and save results
self.cluster_ids = cluster_ids
self.n_clusters = n_clusters
self.n_core_nodes = n_core_nodes
self.cluster_nodes = cluster_nodes
if verbose:
print """
-- Results:
{0:<10} : Clusters
{1:<10} : Cluster (core) nodes
{2:<10} : Nodes in clusters containing KDAR clumps
{3:<10} : Net new KDAR associations for nodes in clusters
{4:<10} : Nodes in clusters containing KDAR conflicts
{5:<10} : Nodes in clusters containing KDAR agreements
{6:<10f} : Ratio of conflict nodes to all multi-KDAR nodes
""".format(n_clusters, n_core_nodes, self.kdar_span_nodes,
self.new_kdar_assoc, self.kdar_conflict_nodes,
self.kdar_agree_nodes, self.conflict_frac)
def to_df(self):
"""
Return the cluster-ID and group flag for a BGPS catalog number in
DataFrame format.
"""
if not self.__scanned:
raise Exception('Tree has not been built, run `dbscan`.')
cols = [self.ver + 'cnum', 'cid', 'group_size', 'group_f',
'group_kdars']
table_data = []
for ix in self.tree.iterkeys():
cid = self.tree[ix][1]
group_f = self.cluster_nodes[cid][2]
if cid == -1:
group_size = 0
group_kdars = 0
else:
group_size = len(self.cluster_nodes[cid][0])
group_kdars = len(self.cluster_nodes[cid][1])
table_data.append([ix, cid, group_size, group_f, group_kdars])
self.cluster_df = pd.DataFrame(table_data, columns=cols).sort(cols[0])
return self.cluster_df
class PpvBroadcaster(object):
"""
Calculate posterior DPDF for clumps associated by PPV groups. Applies a
distance weighted average EMAF prior to a clumps own priors within a
PPV-group. The final feature is downweighted by the conflict fraction.
Parameters
----------
cluster : ClusterDBSCAN
PPV-grouping algorithm `ClusterDBSCAN` instance
Attributes
----------
posteriors : dict
Resultant DPDFs for a catalog number
Usage
-----
>>> pb = PpvBroadcaster(c)
>>> pb.process_posteriors()
"""
rolloff_dist = 100.
posteriors = {}
weighted_emafs = {}
def __init__(self, cluster):
# Cluster parameters
self.cluster = cluster
self.groups = cluster.cluster_nodes
self.angle_lim = cluster.lims[0]
self.velo_lim = cluster.lims[1]
self.conflict_frac = cluster.conflict_frac
# Data files
self.evo = read_cat('bgps_v210_evo').set_index('v210cnum')
self.velos = read_cat('bgps_v210_vel').set_index('v210cnum')
self.dpdf_props = read_cat('bgps_v210_dpdf_props').set_index(
'v210cnum')
self.omni = read_dpdf(v=2)
# start, start * nbins, nbins
self.xdist = np.linspace(self.omni[2].data[0][2],
self.omni[2].data[0][1] * self.omni[2].data[0][0],
self.omni[2].data[0][0])
self.dpdf_shape = self.xdist.shape
def _get_omni_index(self, node):
"""
Calculate the index in the DPDF array for a given catalog number.
Parameters
----------
node : number
Catalog number
Returns
-------
omni_index : number
Index of the distance-omnibus fits table
"""
return np.argwhere(self.dpdf_props.index == node)[0][0]
def _get_emaf_prior(self, node):
"""
Get the EMAF prior from the distance-omnibus fits table for a given
catalog number.
Parameters
----------
node : number
Catalog number
Returns
-------
emaf : array
EMAF prior distribution array
"""
emaf_col = 6
omni_index = self._get_omni_index(node)
return self.omni[emaf_col].data[omni_index]
def _assign_default_posterior(self, node):
"""
For a node that already has a well-constrained DPDF, assign it's own
posterior to the posteriors dictionary.
Parameters
----------
node : number
Catalog number
omni_index : number
Index of the distance-omnibus fits table
"""
post_col = 10
omni_index = self._get_omni_index(node)
posterior = self.omni[post_col].data[omni_index]
self.posteriors[node] = posterior
def _combine_weighted_emaf(self, weights):
"""
Combine the EMAF priors for nodes in a cluster that have
well-constrained DPDFs. The EMAFs are weighted by distance from the
home-node, given a dictionary of nodes-to-weights.
Parameters
----------
weights : dict
Weights for their associated catalog number
"""
weight_sum = np.sum([w for w in weights.values()])
weighted_emaf = np.zeros(self.dpdf_shape)
for node, weight in weights.items():
weighted_emaf += weight * self._get_emaf_prior(node)
return weighted_emaf / weight_sum
def _apply_weighted_emaf(self, node, weighted_emaf):
"""
Apply the distance-weighted EMAF prior to the nodes own priors and
also apply the group-conflict prior. Returns normalized output.
Parameters
----------
node : number
Catalog number for current node
weighted_emaf : numpy.array
Distance-weighted EMAF
Returns
-------
posterior : numpy.array
Normalized posterior probability distribution
"""
omni_index = self._get_omni_index(node)
posterior = np.ones(self.dpdf_shape)
# Combine priors of node
prior_cols = range(3, 9)
for ii in prior_cols:
posterior *= self.omni[ii].data[omni_index]
# Apply group distance-weighted EMAF prior
posterior *= weighted_emaf
# Apply group conflict fraction prior
tan_dist = self.dpdf_props.ix[node, 'dpdf_dtan']
if self.xdist[posterior.argmax()] < tan_dist:
kdar_flag = 'N'
else:
kdar_flag = 'F'
posterior *= self._make_conflict_prior(tan_dist, kdar_flag)
# Re-normalize
posterior /= np.sum(posterior)
return posterior
def _distance_weight(self, angle_sep, velo_sep):
"""
Calculate distance based weights for a node to all other nodes in
the group. Uses a Gaussian based weighting at radii-search / 3.
Parameters
----------
angle_sep : number
Angle seperation between in degrees
velo_sep : number
Velocity seperation between nodes in km/s
Returns
-------
weight : number
"""
lim_multiple = 1.
return np.exp(-(angle_sep**2 + (self.angle_lim / self.velo_lim)**2
* velo_sep**2) / (2 * (self.angle_lim /
lim_multiple)**2))
def _make_conflict_prior(self, tan_dist, kdar_flag):
"""
Create an array for the group-conflict fraction. Based on an Error
function centered on the tangent distance that downweights the given
KDAR flag position.build_tree
Parameters
----------
tan_dist : number
Tangent distance in pc
kdar_flag : str
Distance to downweight for conflict of group associations. Valid
inputs include "N" and "F".
Returns
-------
conflict_prior : numpy.array
"""
if kdar_flag not in ['N', 'F']:
raise ValueError('Invalid KDAR flag {0}.'.format(kdar_flag))
peak_select = {'N': 1.0, 'F': -1.0}
return peak_select[kdar_flag] * self.conflict_frac \
* erf((self.xdist - tan_dist) / self.rolloff_dist) + 1.0
def _process_isolated(self, isolated_nodes):
"""
Assign default posteriors to nodes that are isolated but have
well-constrained DPDFs.
"""
for node in isolated_nodes:
omni_index = self._get_omni_index(node)
node_kdar = self.dpdf_props.ix[node, 'dpdf_KDAR']
if node_kdar in ['N', 'T', 'F', 'O']:
self._assign_default_posterior(node)
def process_posteriors(self):
"""
Calculate and apply the weighted posteriors, then add them to the
posteriors dictionary.
"""
for cid, items in self.groups.iteritems():
group_nodes, group_kdars, kdar_flag = items
# Cluster-ID of -1 for no groups
if cid == -1:
self._process_isolated(group_nodes)
else:
for node in group_nodes:
self.calc_weighted_posterior(node, group_nodes)
# Merge the single/isolated clumps that aren't found in the cluster
# groups.
singles = [ii for ii,jj in self.cluster.good_cnums if ii not in
self.posteriors.keys()]
for node in singles:
self._assign_default_posterior(node)
def calc_weighted_posterior(self, home_node, group_nodes):
"""
For a given node ("Home Node") and the other members of it's group,
calculate the distance-weighted average of the EMAF priors and apply
them to the node. Downweight by a step-function like prior to account
for the group misidentification rate.
Parameters
----------
home_node : number
Catalog number for current node
group_nodes : list-like
List of catalog numbers for members of the group
"""
assert home_node in self.dpdf_props.index
# Remove current node from cluster nodes
group_nodes = [n for n in group_nodes if n != home_node]
# Index in distance omnibus fits object for node
omni_index = self._get_omni_index(home_node)
# Get home node properties
home_node_kdar = self.dpdf_props.ix[home_node, 'dpdf_KDAR']
home_node_glon = self.dpdf_props.ix[home_node, 'dpdf_glon']
home_node_glat = self.dpdf_props.ix[home_node, 'dpdf_glat']
home_node_velo = self.velos.ix[home_node, 'all_vlsr']
# Select nodes with KDARs
kdar_nodes = self.dpdf_props.ix[group_nodes, 'dpdf_KDAR']
kdar_nodes = kdar_nodes[kdar_nodes.isin(['N', 'F'])].index
# If node already has KDAR, then return it's DPDF
if home_node_kdar in ['N', 'F', 'O']:
self._assign_default_posterior(home_node)
elif (home_node_kdar == 'T') & (len(kdar_nodes) == 0):
self._assign_default_posterior(home_node)
elif len(kdar_nodes) == 0:
pass
else:
# For neighbor nodes with KDARs, calculate weights
weights = {}
for node in kdar_nodes:
# Current node coordinates
glon = self.dpdf_props.ix[node, 'dpdf_glon']
glat = self.dpdf_props.ix[node, 'dpdf_glat']
velo = self.velos.ix[node, 'all_vlsr']
# Calc seperation between kdar node and home node
coord_sep = sep(home_node_glat, home_node_glon, glat, glon)
velo_sep = np.abs(home_node_velo - velo)
# Calculate weights
weights[node] = self._distance_weight(coord_sep, velo_sep)
# Average EMAF priors by weights
weighted_emaf = self._combine_weighted_emaf(weights)
self.weighted_emafs[home_node] = weighted_emaf
# Average node specific priors, weighted EMAF, and group conflict
# prior
posterior = self._apply_weighted_emaf(home_node, weighted_emaf)
self.posteriors[home_node] = posterior
def save(self, outname='ppv_dpdf_posteriors'):
"""
Save and pickle the posteriors dictionary to a file `outname`
Parameters
----------
outname : str
Name of output pickle file. File ends in '.pickle' extension.
"""
pickle.dump(self.posteriors, open(outname + '.pickle', 'wb'))
class ClusterRegion(object):
"""
Create DS9 region files from a `ClusterDBSCAN` instance.
Parameters
----------
obj : besl.ppv_group.ClusterDBSCAN
"""
ver = 'v210'
def __init__(self, obj, **kwargs):
self.obj = obj
self.angle_rad, self.velo_rad = obj.lims
self.preamble = 'global color=green font="helvetica 10 normal" ' + \
'select=1 highlite=1 edit=1 move=1 delete=1 ' + \
'include=1 fixed=0\ngalactic\n'
self.color_ring = deque(['green', 'red', 'blue', 'yellow', 'magenta',
'cyan', 'orange'])
self.point_entry = 'x point {l} {b} # text={lcb}{v}:{cid}:{ix}{rcb} color={c}\n'
self.circle_entry = 'circle {l} {b} ' + str(self.angle_rad) + ' # color={c}\n'
self.braces = {'lcb': '{', 'rcb': '}'}
def _write_region(self, out_filen, text):
with open(out_filen + '.reg', 'w') as f:
f.write(text)
def write_all(self):
"""
Do all write methods with default parameters.
"""
self.clusters()
self.clusters(out_filen='ppv_group_nocirc', search_circles=False)
self.agree_conflict()
self.single_kdar()
self.no_kdar()
def clusters(self, out_filen='ppv_group', search_circles=True):
"""
Write a DS9 regions file with cluster nodes colored by cluster and circles
showing the angular search radius, and the nodes velocity visualized.
Parameters
----------
out_filen : str
Name of regions files, ends in '.reg' extension.
search_circles : bool, default False
Include the circular apertures to extent of angular search radius.
Attributes
----------
cluster_text : str
String written to file
"""
obj = self.obj
color_ring = self.color_ring
all_lines = self.preamble
for cid, params in obj.cluster_nodes.iteritems():
nodes = params[0]
kdars = params[1]
conflict_flag = params[2]
if cid == -1:
c = 'black'
else:
c = color_ring[0]
for ii in nodes:
l = obj.df.ix[ii, 'glon_peak']
b = obj.df.ix[ii, 'glat_peak']
v = obj.df.ix[ii, 'all_vlsr']
cnum = obj.df.ix[ii, self.ver + 'cnum']
all_lines += self.point_entry.format(l=l, b=b, v=v, cid=cid,
ix=cnum, c=c, **self.braces)
if search_circles:
all_lines += self.circle_entry.format(l=l, b=b, c=c)
color_ring.rotate()
self._write_region(out_filen, text=all_lines)
self.cluster_text = all_lines
def agree_conflict(self, out_filen='agree_conflict_group'):
"""
Write a DS9 regions file with cluster nodes colored by cluster and circles
showing the angular search radius, and the nodes velocity visualized.
Parameters
----------
out_filen : str
Name of regions files, ends in '.reg' extension.
Attributes
----------
agree_conflict_text : str
String written to file
"""
obj = self.obj
all_lines = self.preamble
all_colors = {1: 'green', 2: 'red'}
for cid, params in obj.cluster_nodes.iteritems():
nodes = params[0]
kdars = params[1]
conflict_flag = params[2]
if (len(kdars) < 2) | (cid == -1):
continue
for ii in nodes:
l = obj.df.ix[ii, 'glon_peak']
b = obj.df.ix[ii, 'glat_peak']
c = all_colors[conflict_flag]
all_lines += self.circle_entry.format(l=l, b=b, c=c)
self._write_region(out_filen, text=all_lines)
self.agree_conflict_text = all_lines
def single_kdar(self, out_filen='single_kdar_group'):
"""
Write a DS9 regions file for clusters that contain only a single KDAR.
Parameters
----------
out_filen : str
Name of regions files, ends in '.reg' extension.
Attributes
----------
single_kdar_text : str
String written to file
"""
obj = self.obj
all_lines = self.preamble
for cid, params in obj.cluster_nodes.iteritems():
nodes = params[0]
kdars = params[1]
if len(kdars) == 1:
for ii in nodes:
l = obj.df.ix[ii, 'glon_peak']
b = obj.df.ix[ii, 'glat_peak']
all_lines += self.circle_entry.format(l=l, b=b, c='grey')
self._write_region(out_filen, text=all_lines)
self.single_kdar_text = all_lines
def no_kdar(self, out_filen='no_kdar_group'):
"""
Write a DS9 regions file for clusters that contain no KDAR.
Parameters
----------
out_filen : str
Name of regions file, ends in '.reg' extension.
Attributes
----------
no_kdar_text : str
String written to file
"""
obj = self.obj
all_lines = self.preamble
for cid, params in obj.cluster_nodes.iteritems():
nodes = params[0]
kdars = params[1]
if len(kdars) == 0:
for ii in nodes:
l = obj.df.ix[ii, 'glon_peak']
b = obj.df.ix[ii, 'glat_peak']
all_lines += self.circle_entry.format(l=l, b=b, c='black')
self._write_region(out_filen, text=all_lines)
self.no_kdar_text = all_lines
def grid_calc(lims=[0.05, 0.2, 1, 4], points=[10, 10], out_filen='obj_grid'):
"""
Run DBSCAN for a grid of angle and velocity search distances. Uses
multiprocssesing by default.
Parameters
----------
lims : list
List of limits for search [angle_min, angle_max, velocity_min,
velocity_max].
points : list
Number of grid points to sample, end-inclusive.
out_filen : str
Filename for pickled object grid, ends in '.pickle' extension.
Returns
-------
obj_grid : np.array
Object array of `ClusterDBSCAN` instances
X, Y : np.array
Result of np.meshgrid over arrays of sample points
"""
assert (len(lims) == 4) & (len(points) == 2)
assert (points[0] >= 2) & (points[1] >= 2)
x = np.linspace(lims[0], lims[1], points[0])
y = np.linspace(lims[2], lims[3], points[1])
X, Y = np.meshgrid(x, y)
limits = np.dstack([X, Y]).reshape(-1, 2)
clusters = (ClusterDBSCAN(lims=l) for l in limits)
# Compute clusters with multiprocessing
pool = Pool(processes=6)
obj_grid = pool.imap(wrapper, clusters)
pool.close()
pool.join()
# Reshape to grid
obj_grid = np.reshape([obj for obj in obj_grid], X.shape)
with open(out_filen + '.pickle', 'wb') as f:
pickle.dump([obj_grid, X, Y], f)
return obj_grid, X, Y
def wrapper(c):
"""
Wrapper function on the top-level domain so that the object is pickleable
for `multiprocessing.Pool`.
"""
c.dbscan()
c.analysis(verbose=True)
return c
def reduce_obj_grid(filen='obj_grid', out_filen='obj_props'):
"""
Extract parameters from object grid into a dictionary of matricies for easy
plotting and analysis.
Parameters
----------
filen : str, default 'obj_grid'
Filename of the `obj_grid` pickle ending in the '.pickle' extension.
out_filen : str, default 'obj_props'
Filename of the reduced object parameter dictionary, ends in the
'.pickle' extension.
Returns
-------
obj_dict : dict
Dictionary of the analysis values over each tree in `obj_grid`.
"""
obj_grid, X, Y = pickle.load(open(filen + '.pickle', 'rb'))
obj_dict = {}
obj_dict['angle'] = X
obj_dict['velo'] = Y
props = [('n_clusters', lambda c: c.n_clusters),
('kdar_conflict_nodes', lambda c: c.kdar_conflict_nodes),
('kdar_conflict_clusters', lambda c: c.kdar_conflict_clusters),
('kdar_agree_nodes', lambda c: c.kdar_agree_nodes),
('kdar_agree_clusters', lambda c: c.kdar_agree_clusters),
('kdar_span_nodes', lambda c: c.kdar_span_nodes),
('new_kdar_assoc', lambda c: c.new_kdar_assoc),
('conflict_frac', lambda c: c.conflict_frac)]
for key, method in props:
obj_dict[key] = np.reshape(map(method, obj_grid.flat), X.shape)
with open(out_filen + '.pickle', 'wb') as f:
pickle.dump(obj_dict, f)
def kdar_flag_region(out_filen='kdar_flags'):
"""
Write a DS9 region file with the KDA resolution flags at coordinates.
Parameters
----------
out_filen : str
Output regions filename with extension '.reg'
Returns
-------
all_lines : str
Text string for region file content for parsing.
"""
# Read in data
df = read_bgps_vel()
df = df.set_index('v210cnum')
dpdf = read_cat('bgps_v210_kdars')
dpdf = dpdf[dpdf['kdar'] != 'U']
# Preamble
all_lines = 'global color=green font="helvetica 10 normal" select=1 ' + \
'highlite=1 edit=1 move=1 delete=1 include=1 fixed=0\n'
all_lines += 'galactic\n'
all_colors = {'U': 'black', 'F': 'red', 'N': 'green', 'T': 'orange', 'O':
'magenta'}
# Plot strings
text_entry = 'text {l} {b} # text={lcb}{flag}{rcb} color={c}\n'
braces = {'lcb': '{', 'rcb': '}'}
for cnum, kdar in dpdf.values:
l = df.ix[cnum, 'glon_peak']
b = df.ix[cnum, 'glat_peak'] - 0.005
c = all_colors[kdar]
all_lines += text_entry.format(l=l, b=b, c=c, flag=kdar, **braces)
with open(out_filen + '.reg', 'w') as f:
f.write(all_lines)
return all_lines
| autocorr/besl | besl/ppv_group.py | Python | gpl-3.0 | 31,445 | [
"Gaussian"
] | d60204082456c97c20e2b108a8f6aea7b6f241c45350c830103b91c31fecbee5 |
def writeClassical(mdpclean,outdir,mdpf,nondimer,rcoulomb,ew_rtol,pme):
"""
Gromacs MDP file for the classical replica.
The classical replica has its own settings that differ from
those of the other delocalized replicas. A Dimer simulation with
a classical replica will thus have two .mdp files, one for the classical replica
and a different one for all the others.
Arguments:
mdpclean: List of the input .mdp file purged of the settings that will be overwritten.
outdir: String containing the o utput directory
mdpf: String. Contains the input .mdp filename, is manipulated to provide basename.0.mdp as output filename
as is usual convention for replica exchange in Gromacs.
allatoms: Flag, True/False. If true the whole system is dimerized (i.e. there's no explicit solvent)
"""
nms=mdpf.split(".")
nnm = nms[0:len(nms)-1]
nnm = reduce(lambda x,y : x+"."+y, nnm)
nnm = outdir+"mdp.0.mdp"
f = open(nnm,"w+")
for ln in mdpclean:
f.write(ln+"\n")
str1=""
str2=""
str3=""
str4="User"
if pme:
str4="PME-User"
if nondimer:
str1="NONDIM"
str2="NONDIM NONDIM INTF NONDIM"
str3="NONINT NONDIM"
fstr="""
; lines added by DIMERIZER
integrator=md
nstcalcenergy=1
vdw_type=user
coulombtype=%s ; can be either User or PME-User. Don't change here unless you also change tables.
rcoulomb= %s
ew-rtol= %s
cutoff-scheme=group
energygrps=NONINT INTF %s
energygrp_table=INTF INTF %s
energygrp-excl=NONINT NONINT NONINT INTF %s
""" % (str4,str(rcoulomb),str(ew_rtol),str1,str2,str3)
f.write(fstr)
def writeDimer(mdpclean,outdir,mdpf,nondimer,rcoulomb,ew_rtol,pme):
"""
Gromacs MDP file for the delocalized replicas.
This .mdp file is used for all the non-classical Dimer replicas. Only the
classical replica has a different mdp file.
Arguments:
mdpclean: List of the input .mdp file purged of the settings that will be overwritten.
outdir: String containing the o utput directory
mdpf: String. Contains the input .mdp filename, is manipulated to provide basename.1.mdp as output filename
as is usual convention for replica exchange in Gromacs.
allatoms: Flag, True/False. If true the whole system is dimerized (i.e. there's no explicit solvent)
"""
nms=mdpf.split(".")
nnm = nms[0:len(nms)-1]
nnm = reduce(lambda x,y : x+"."+y, nnm)
nnm = outdir+"mdp.1.mdp"
f = open(nnm,"w+")
for ln in mdpclean:
f.write(ln+"\n")
str1="INT1 INT2 NONINT"
if nondimer:
str1 = str1 + " NONDIM"
if pme:
str2="INT1 INT1 INT2 INT2"
str3="INT1 INT2 NONINT INT1 NONINT INT2"
if nondimer:
str2 = str2+" NONDIM NONDIM INT1 NONDIM INT2 NONDIM NONINT NONINT NONINT NONDIM"
else:
str2= "INT1 INT1 INT2 INT2"
str3= "INT1 INT2 NONINT INT1 NONINT INT2 NONINT NONINT"
if nondimer:
str2 = str2 + " NONDIM NONDIM INT1 NONDIM INT2 NONDIM"
str3 = str3 + " NONINT NONDIM"
str4="User"
if pme:
str4="PME-User"
fstr="""
; lines added by DIMERIZER
integrator=md
nstcalcenergy=1
vdw_type=user
coulombtype=%s ; can be either User or PME-User. Don't change here unless you also change tables.
rcoulomb= %s
ew-rtol= %s
cutoff-scheme=group
energygrps=%s
energygrp_table=%s
energygrp-excl=%s
""" % (str4,str(rcoulomb),str(ew_rtol),str1,str2,str3)
f.write(fstr)
| marckn/dimerizer | dimerizer/mdp/mdp_writer.py | Python | gpl-3.0 | 3,535 | [
"Gromacs"
] | 77614ac618de99363174463f6d9f2dc41c9f0714811de29c4a4d6c6fb7943043 |
# -*- coding: utf-8 -*-
u"""
Created on 2015-7-23
@author: cheng.li
"""
import unittest
import math
import copy
import tempfile
import pickle
import os
from PyFin.Math.Distributions import InverseCumulativeNormal
from PyFin.Math.Distributions import NormalDistribution
from PyFin.Math.Distributions import CumulativeNormalDistribution
average = 1.0
sigma = 2.0
def gaussian(x):
normFact = sigma * math.sqrt(2.0 * math.pi)
dx = x - average
return math.exp(-dx * dx / (2.0 * sigma * sigma)) / normFact
def gaussianDerivative(x):
normFact = sigma * sigma * sigma * math.sqrt(2.0 * math.pi)
dx = x - average
return -dx * math.exp(-dx * dx / (2.0 * sigma * sigma)) / normFact
class TestDistribution(unittest.TestCase):
def testNormal(self):
invCumStandardNormal = InverseCumulativeNormal()
check = invCumStandardNormal(0.5)
self.assertAlmostEqual(check, 0.0, 10, "inverse cumulative of the standard normal at 0.5 is {0:f}"
"\n instead of zero: something is wrong!".format(check))
normal = NormalDistribution(average, sigma)
cum = CumulativeNormalDistribution(average, sigma)
invCum = InverseCumulativeNormal(average, sigma)
invCumAcc = InverseCumulativeNormal(average, sigma, fullAccuracy=True)
numberOfStandardDeviation = 6
xMin = average - numberOfStandardDeviation * sigma
xMax = average + numberOfStandardDeviation * sigma
N = 100001
h = (xMax - xMin) / (N - 1)
x = [xMin + i * h for i in range(N)]
y = [gaussian(v) for v in x]
yd = [gaussianDerivative(v) for v in x]
temp = [normal(v) for v in x]
for i, (expected, calculated) in enumerate(zip(y, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
temp = [cum(v) for v in x]
temp = [invCum(v) for v in temp]
for i, (expected, calculated) in enumerate(zip(x, temp)):
self.assertAlmostEqual(expected, calculated, 7, "at index {0:d}\n"
"Expected gaussian: {1:f}\n"
"Calculated Gaussian: {2:f}".format(i, expected,
calculated))
temp = [cum(v) for v in x]
temp = [invCumAcc(v) for v in temp]
for i, (expected, calculated) in enumerate(zip(x, temp)):
self.assertAlmostEqual(expected, calculated, 7, "at index {0:d}\n"
"Expected gaussian: {1:.9f}\n"
"Calculated Gaussian: {2:.9f}".format(i, expected,
calculated))
temp = [cum.derivative(v) for v in x]
for i, (expected, calculated) in enumerate(zip(y, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
temp = [normal.derivative(v) for v in x]
for i, (expected, calculated) in enumerate(zip(yd, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
# test nan value returning
self.assertTrue(math.isnan(invCum(-0.5)))
def testNormalDistributionCopy(self):
norm = NormalDistribution(average, sigma)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testNormalDistributionPickle(self):
benchmark_norm = NormalDistribution(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm)
os.unlink(f.name)
def testCumulativeNormalDistribution(self):
norm = CumulativeNormalDistribution(average, sigma)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testCumulativeNormalDistributionPickle(self):
benchmark_norm = CumulativeNormalDistribution(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm)
os.unlink(f.name)
def testInverseCumulativeNormal(self):
norm = InverseCumulativeNormal(average, sigma, True)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testInverseCumulativeNormalPickle(self):
benchmark_norm = InverseCumulativeNormal(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm) | wegamekinglc/Finance-Python | PyFin/tests/Math/Distributions/testDistribution.py | Python | mit | 5,716 | [
"Gaussian"
] | f63095c357e463e55a0ea5de8e8a938caec1bc8cb7c9897be3bef66a3ffc81ae |
import numpy as np
from Methods import *
class NaiveBayes(Predictor):
def __init__(self):
self.class_probabilities = {}
self.probs = {}
@staticmethod
def moment_1(instances, i):
summation = 0
instance_count = 0
for instance in instances:
instance_count = instance_count + 1
summation = summation + instance.get_feature_vector()[i]
return float(summation) / instance_count
@staticmethod
def moment_2(instances, i, mean):
sum_standard_deviation = 0
instance_count = 0
for instance in instances:
instance_count = instance_count + 1
sum_standard_deviation = sum_standard_deviation + (instance.get_feature_vector()[i] - mean) ** 2
return sum_standard_deviation / float(len(instances) - 1)
def calc_feature_wise_probabilities(self, instance, label, pre_prob):
for i in instance.get_feature_vector().keys():
pre_prob[label] *= self.calc_prob(instance.get_feature_vector()[i],
self.probs[label][i])
@staticmethod
def calc_prob(value, mv):
mju_1, mju_2 = mv
gaussian = (1 / np.sqrt(2 * np.pi * mju_2)) * np.exp((-1 / (2 * mju_2)) * (value - mju_1) ** 2)
return gaussian
def separate_data(self, instances):
separated_data = {}
for instance in instances:
label = instance.get_label()
self.make_separation_decision(label, separated_data)
separated_data[label].append(instance)
self.class_probabilities[label] += 1
instance_count = instance.get_feature_vector().keys()
return separated_data, instance_count
def make_separation_decision(self, label, separated_data):
if label not in self.class_probabilities:
self.class_probabilities[label] = 0
self.probs[label] = {}
separated_data[label] = []
def train(self, instances):
separated_data, attributes_length = self.separate_data(instances)
for label in self.class_probabilities:
self.class_probabilities[label] = self.class_probabilities[label] / float(len(instances))
self.calc_moments(attributes_length, label, separated_data)
def calc_moments(self, attributes_length, label, separated_data):
for i in attributes_length:
mju_1 = self.moment_1(separated_data[label], i)
mju_2 = self.moment_2(separated_data[label], i, mju_1)
self.probs[label][i] = mju_1, mju_2
def predict(self, instance, cur_index=0):
pre_prob = {}
for label in self.class_probabilities:
pre_prob[label] = self.class_probabilities[label]
self.calc_feature_wise_probabilities(instance, label, pre_prob)
return max(pre_prob, key=pre_prob.get)
| thewickedaxe/Artificial-Intelligence---600.435 | HW-4/NaiveBayes.py | Python | gpl-3.0 | 2,886 | [
"Gaussian"
] | d074673560543b6bd1d8af710b6c2f35155a458ebf629b3c29a665f3715ef96a |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Diffusion map --- :mod:`MDAnalysis.analysis.diffusionmap`
=====================================================================
:Authors: Eugen Hruska, John Detlefs
:Year: 2016
:Copyright: GNU Public License v2
This module contains the non-linear dimension reduction method diffusion map.
The eigenvectors of a diffusion matrix represent the 'collective coordinates'
of a molecule; the largest eigenvalues are the more dominant collective
coordinates. Assigning phyiscal meaning to the 'collective coordinates' is a
fundamentally difficult problem. The time complexity of the diffusion map is
:math:`O(N^3)`, where N is the number of frames in the trajectory, and the in-memory
storage complexity is :math:`O(N^2)`. Instead of a single trajectory a sample of
protein structures can be used. The sample should be equiblibrated, at least
locally. The order of the sampled structures in the trajectory is irrelevant.
The :ref:`Diffusion-Map-tutorial` shows how to use diffusion map for dimension
reduction.
More details about diffusion maps are in [deLaPorte1]_, [Lafon1]_ ,
[Ferguson1]_, and [Clementi1]_.
.. _Diffusion-Map-tutorial:
Diffusion Map tutorial
----------------------
The example uses files provided as part of the MDAnalysis test suite
(in the variables :data:`~MDAnalysis.tests.datafiles.PSF` and
:data:`~MDAnalysis.tests.datafiles.DCD`). This tutorial shows how to use the
Diffusion Map class.
First load all modules and test data
.. code-block:: python
import MDAnalysis as mda
import MDAnalysis.analysis.diffusionmap as diffusionmap
from MDAnalysis.tests.datafiles import PSF, DCD
Given a universe or atom group, we can create and eigenvalue decompose
the Diffusion Matrix from that trajectory using :class:`DiffusionMap`:: and get
the corresponding eigenvalues and eigenvectors.
.. code-block:: python
u = mda.Universe(PSF,DCD)
We leave determination of the appropriate scale parameter epsilon to the user,
[Clementi1]_ uses a complex method involving the k-nearest-neighbors of a
trajectory frame, whereas others simple use a trial-and-error approach with
a constant epsilon. Currently, the constant epsilon method is implemented
by MDAnalysis.
.. code-block:: python
dmap = diffusionmap.DiffusionMap(u, select='backbone', epsilon=2)
dmap.run()
From here we can perform an embedding onto the k dominant eigenvectors. The
non-linearity of the map means there is no explicit relationship between the
lower dimensional space and our original trajectory. However, this is an
isometry (distance preserving map), which means that points close in the lower
dimensional space are close in the higher-dimensional space and vice versa.
In order to embed into the most relevant low-dimensional space, there should
exist some number of dominant eigenvectors, whose corresponding eigenvalues
diminish at a constant rate until falling off, this is referred to as a
spectral gap and should be somewhat apparent for a system at equilibrium with a
high number of frames.
.. code-block:: python
import matplotlib.pyplot as plt
f, ax = plt.subplots()
upper_limit = # some reasonably high number less than the n_eigenvectors
ax.plot(dmap.eigenvalues[:upper_limit])
ax.set(xlabel ='eigenvalue index', ylabel='eigenvalue')
plt.tight_layout()
From here we can transform into the diffusion space
.. code-block:: python
num_eigenvectors = # some number less than the number of frames after
# inspecting for the spectral gap
fit = dmap.transform(num_eigenvectors, time=1)
It can be difficult to interpret the data, and is left as a task
for the user. The `diffusion distance` between frames i and j is best
approximated by the euclidean distance between rows i and j of
self.diffusion_space.
Classes
-------
.. autoclass:: DiffusionMap
.. autoclass:: DistanceMatrix
References
----------
If you use this Dimension Reduction method in a publication, please
cite [Lafon1]_.
If you choose the default metric, this module uses the fast QCP algorithm
[Theobald2005]_ to calculate the root mean square distance (RMSD) between two
coordinate sets (as implemented in
:func:`MDAnalysis.lib.qcprot.CalcRMSDRotationalMatrix`). When using this
module in published work please cite [Theobald2005]_.
.. [Lafon1] Coifman, Ronald R., Lafon, Stephane. Diffusion
maps. Appl. Comput. Harmon. Anal. 21, 5–30 (2006).
.. [deLaPorte1] J. de la Porte, B. M. Herbst, W. Hereman, S. J. van der Walt.
An Introduction to Diffusion Maps. In: The 19th Symposium of the
Pattern Recognition Association of South Africa (2008).
.. [Clementi1] Rohrdanz, M. A, Zheng, W, Maggioni, M, & Clementi, C.
Determination of reaction coordinates via locally scaled diffusion
map. J. Chem. Phys. 134, 124116 (2011).
.. [Ferguson1] Ferguson, A. L.; Panagiotopoulos, A. Z.; Kevrekidis, I. G.
Debenedetti, P. G. Nonlinear dimensionality reduction in molecular
simulation: The diffusion map approach Chem. Phys. Lett. 509, 1−11
(2011)
"""
import logging
import warnings
import numpy as np
from MDAnalysis.core.universe import Universe
from .rms import rmsd
from .base import AnalysisBase
logger = logging.getLogger("MDAnalysis.analysis.diffusionmap")
class DistanceMatrix(AnalysisBase):
"""Calculate the pairwise distance between each frame in a trajectory
using a given metric
A distance matrix can be initialized on its own and used as an
initialization argument in :class:`DiffusionMap`.
Parameters
----------
universe : `~MDAnalysis.core.universe.Universe`
The MD Trajectory for dimension reduction, remember that
computational cost of eigenvalue decomposition
scales at O(N^3) where N is the number of frames.
Cost can be reduced by increasing step interval or specifying a
start and stop value when calling :meth:`DistanceMatrix.run`.
select : str, optional
Any valid selection string for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms`
This selection of atoms is used to calculate the RMSD between
different frames. Water should be excluded.
metric : function, optional
Maps two numpy arrays to a float, is positive definite and
symmetric. The API for a metric requires that the arrays must have
equal length, and that the function should have weights as an
optional argument. Weights give each index value its own weight for
the metric calculation over the entire arrays. Default: metric is
set to rms.rmsd().
cutoff : float, optional
Specify a given cutoff for metric values to be considered equal,
Default: 1EO-5
weights : array, optional
Weights to be given to coordinates for metric calculation
verbose : bool, optional
Show detailed progress of the calculation if set to ``True``; the
default is ``False``.
Attributes
----------
atoms : `~MDAnalysis.core.groups.AtomGroup`
Selected atoms in trajectory subject to dimension reduction
results.dist_matrix : numpy.ndarray, (n_frames, n_frames)
Array of all possible ij metric distances between frames in trajectory.
This matrix is symmetric with zeros on the diagonal.
.. versionadded:: 2.0.0
dist_matrix : numpy.ndarray, (n_frames, n_frames)
.. deprecated:: 2.0.0
Will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.dist_matrix` instead.
Example
-------
Often, a custom distance matrix could be useful for local
epsilon determination or other manipulations on the diffusion
map method. The :class:`DistanceMatrix` exists in
:mod:`~MDAnalysis.analysis.diffusionmap` and can be passed
as an initialization argument for :class:`DiffusionMap`.
.. code-block:: python
import MDAnalysis as mda
import MDAnalysis.analysis.diffusionmap as diffusionmap
from MDAnalysis.tests.datafiles import PSF, DCD
Now create the distance matrix and pass it as an argument to
:class:`DiffusionMap`.
u = mda.Universe(PSF,DCD)
dist_matrix = diffusionmap.DistanceMatrix(u, select='all')
dist_matrix.run()
dmap = diffusionmap.DiffusionMap(dist_matrix)
dmap.run()
.. versionchanged:: 1.0.0
``save()`` method has been removed. You can use ``np.save()`` on
:attr:`DistanceMatrix.results.dist_matrix` instead.
.. versionchanged:: 2.0.0
:attr:`dist_matrix` is now stored in a
:class:`MDAnalysis.analysis.base.Results` instance.
"""
def __init__(self, universe, select='all', metric=rmsd, cutoff=1E0-5,
weights=None, **kwargs):
# remember that this must be called before referencing self.n_frames
super(DistanceMatrix, self).__init__(universe.trajectory, **kwargs)
self.atoms = universe.select_atoms(select)
self._metric = metric
self._cutoff = cutoff
self._weights = weights
self._calculated = False
def _prepare(self):
self.results.dist_matrix = np.zeros((self.n_frames, self.n_frames))
def _single_frame(self):
iframe = self._ts.frame
i_ref = self.atoms.positions
# diagonal entries need not be calculated due to metric(x,x) == 0 in
# theory, _ts not updated properly. Possible savings by setting a
# cutoff for significant decimal places to sparsify matrix
for j, ts in enumerate(self._trajectory[iframe:self.stop:self.step]):
self._ts = ts
j_ref = self.atoms.positions
dist = self._metric(i_ref, j_ref, weights=self._weights)
self.results.dist_matrix[self._frame_index,
j+self._frame_index] = (
dist if dist > self._cutoff else 0)
self.results.dist_matrix[j+self._frame_index,
self._frame_index] = (
self.results.dist_matrix[self._frame_index,
j+self._frame_index])
self._ts = self._trajectory[iframe]
@property
def dist_matrix(self):
wmsg = ("The `dist_matrix` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.dist_matrix` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.dist_matrix
def _conclude(self):
self._calculated = True
class DiffusionMap(object):
"""Non-linear dimension reduction method
Dimension reduction with diffusion mapping of selected structures in a
trajectory.
Attributes
----------
eigenvalues: array (n_frames,)
Eigenvalues of the diffusion map
Methods
-------
run()
Constructs an anisotropic diffusion kernel and performs eigenvalue
decomposition on it.
transform(n_eigenvectors, time)
Perform an embedding of a frame into the eigenvectors representing
the collective coordinates.
"""
def __init__(self, u, epsilon=1, **kwargs):
"""
Parameters
-------------
u : MDAnalysis Universe or DistanceMatrix object
Can be a Universe, in which case one must supply kwargs for the
initialization of a DistanceMatrix. Otherwise, this can be a
DistanceMatrix already initialized. Either way, this will be made
into a diffusion kernel.
epsilon : Float
Specifies the method used for the choice of scale parameter in the
diffusion map. More information in [Lafon1]_, [Ferguson1]_ and
[Clementi1]_, Default: 1.
**kwargs
Parameters to be passed for the initialization of a
:class:`DistanceMatrix`.
"""
if isinstance(u, Universe):
self._dist_matrix = DistanceMatrix(u, **kwargs)
elif isinstance(u, DistanceMatrix):
self._dist_matrix = u
else:
raise ValueError("U is not a Universe or DistanceMatrix and"
" so the DiffusionMap has no data to work with.")
self._epsilon = epsilon
def run(self, start=None, stop=None, step=None):
""" Create and decompose the diffusion matrix in preparation
for a diffusion map.
Parameters
----------
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
.. versionchanged:: 0.19.0
Added start/stop/step kwargs
"""
# run only if distance matrix not already calculated
if not self._dist_matrix._calculated:
self._dist_matrix.run(start=start, stop=stop, step=step)
# important for transform function and length of .run() method
self._n_frames = self._dist_matrix.n_frames
if self._n_frames > 5000:
warnings.warn("The distance matrix is very large, and can "
"be very slow to compute. Consider picking a larger "
"step size in distance matrix initialization.")
self._scaled_matrix = (self._dist_matrix.results.dist_matrix ** 2 /
self._epsilon)
# take negative exponent of scaled matrix to create Isotropic kernel
self._kernel = np.exp(-self._scaled_matrix)
D_inv = np.diag(1 / self._kernel.sum(1))
self._diff = np.dot(D_inv, self._kernel)
self._eigenvals, self._eigenvectors = np.linalg.eig(self._diff)
sort_idx = np.argsort(self._eigenvals)[::-1]
self.eigenvalues = self._eigenvals[sort_idx]
self._eigenvectors = self._eigenvectors[sort_idx]
self._calculated = True
return self
def transform(self, n_eigenvectors, time):
""" Embeds a trajectory via the diffusion map
Parameters
---------
n_eigenvectors : int
The number of dominant eigenvectors to be used for
diffusion mapping
time : float
Exponent that eigenvalues are raised to for embedding, for large
values, more dominant eigenvectors determine diffusion distance.
Return
------
diffusion_space : array (n_frames, n_eigenvectors)
The diffusion map embedding as defined by [Ferguson1]_.
"""
return (self._eigenvectors[1:n_eigenvectors+1,].T *
(self.eigenvalues[1:n_eigenvectors+1]**time))
| MDAnalysis/mdanalysis | package/MDAnalysis/analysis/diffusionmap.py | Python | gpl-2.0 | 15,865 | [
"MDAnalysis"
] | 1a00701eacad60588cfaa7c3d856247d20f73d4e4c44b76e568410a0120db88e |
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, _
class GroupVisitPaymentForm(models.AbstractModel):
_name = "cms.form.event.group.visit.payment"
_inherit = "cms.form.payment"
event_id = fields.Many2one("crm.event.compassion", readonly=False)
registration_id = fields.Many2one("event.registration", readonly=False)
partner_name = fields.Char("Participant", readonly=True)
@property
def _payment_redirect(self):
return f"/event/payment/gpv_payment_validate/{self.invoice_id.id}"
@property
def _form_fieldsets(self):
return [
{
"id": "payment",
"fields": ["partner_name"],
},
]
@property
def form_title(self):
if self.event_id:
return self.event_id.name + " " + _("payment")
else:
return _("Travel payment")
@property
def submit_text(self):
return _("Proceed with payment")
@property
def form_widgets(self):
# Hide fields
res = super().form_widgets
res["partner_name"] = "cms_form_compassion.form.widget.readonly"
return res
def form_init(self, request, main_object=None, **kw):
form = super().form_init(request, main_object, **kw)
# Store ambassador and event in model to use it in properties
registration = kw.get("registration")
if registration:
form.event_id = registration.compassion_event_id
form.partner_id = registration.partner_id
form.registration_id = registration
return form
def _form_load_partner_name(self, fname, field, value, **req_values):
return self.partner_id.sudo().name
def generate_invoice(self):
# modifiy and add line
group_visit_invoice = self.registration_id.sudo().group_visit_invoice_id
# Admin
analytic_account = (
self.env["account.analytic.account"]
.sudo()
.search([("code", "=", "ATT_ADM")])
)
# Financial Expenses
account = self.env["account.account"].sudo().search([("code", "=", "4200")])
existing_tax = group_visit_invoice.invoice_line_ids.filtered(
lambda l: l.account_id == account
)
if not existing_tax:
group_visit_invoice.with_delay().modify_open_invoice(
{
"invoice_line_ids": [
(
0,
0,
{
"quantity": 1.0,
"price_unit": group_visit_invoice.amount_total
* 0.019,
"account_id": account.id,
"name": "Credit card tax",
"account_analytic_id": analytic_account.id,
},
)
]
}
)
return group_visit_invoice
| CompassionCH/compassion-switzerland | website_event_compassion/forms/group_visit_payment.py | Python | agpl-3.0 | 3,372 | [
"VisIt"
] | b1c1c74525a628a052a0910419427c0a85becc69ebcdef8909322edb76bf9121 |
"""
Test helper functions and base classes.
"""
import functools
import inspect
import json
import operator
import os
import pprint
import unittest
import urlparse
from contextlib import contextmanager
from datetime import datetime
from unittest import TestCase
import requests
from bok_choy.javascript import js_defined
from bok_choy.page_object import XSS_INJECTION
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import CourseLocator
from path import Path as path
from pymongo import ASCENDING, MongoClient
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common import BASE_URL
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from openedx.core.lib.tests.assertions.events import EventMatchTolerates, assert_event_matches, is_matching_event
from openedx.core.release import RELEASE_LINE, doc_version
from xmodule.partitions.partitions import UserPartition
MAX_EVENTS_IN_FAILURE_OUTPUT = 20
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
# TODO: Design and implement a better solution that is reliable and repeatable,
# reflects how the application works in production, and limits the third-party
# network traffic (e.g. repeatedly retrieving the js from youtube from the browser).
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'https://www.youtube.com/iframe_api',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def is_focused_on_element(browser, selector):
"""
Check if the focus is on the element that matches the selector.
"""
return browser.execute_script("return $('{}').is(':focus')".format(selector))
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path
with open(full_path) as data_file:
return data_file.read()
def remove_file(filename):
"""
Remove a file if it exists
"""
if os.path.exists(filename):
os.remove(filename)
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
@js_defined('window.jQuery')
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
@js_defined('window.jQuery')
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
def select_option_by_text(select_browser_query, option_text, focus_out=False):
"""
Chooses an option within a select by text (helper method for Select's select_by_visible_text method).
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def select_option(query, value):
""" Get the first select element that matches the query and select the desired value. """
try:
select = Select(query.first.results[0])
select.select_by_visible_text(value)
if focus_out:
query.first.results[0].send_keys(Keys.TAB)
return True
except StaleElementReferenceException:
return False
msg = 'Selected option {}'.format(option_text)
EmptyPromise(lambda: select_option(select_browser_query, option_text), msg).fulfill()
def get_selected_option_text(select_browser_query):
"""
Returns the text value for the first selected option within a select.
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def get_option(query):
""" Get the first select element that matches the query and return its value. """
try:
select = Select(query.first.results[0])
return (True, select.first_selected_option.text)
except StaleElementReferenceException:
return (False, None)
text = Promise(lambda: get_option(select_browser_query), 'Retrieved selected option text').fulfill()
return text
def get_options(select_browser_query):
"""
Returns all the options for the given select.
"""
return Select(select_browser_query.first.results[0]).options
def generate_course_key(org, number, run):
"""
Makes a CourseLocator from org, number and run
"""
default_store = os.environ.get('DEFAULT_STORE', 'draft')
return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
def select_option_by_value(browser_query, value, focus_out=False):
"""
Selects a html select element by matching value attribute
"""
select = Select(browser_query.first.results[0])
select.select_by_value(value)
def options_selected():
"""
Returns True if all options in select element where value attribute
matches `value`. if any option is not selected then returns False
and select it. if value is not an option choice then it returns False.
"""
all_options_selected = True
has_option = False
for opt in select.options:
if opt.get_attribute('value') == value:
has_option = True
if not opt.is_selected():
all_options_selected = False
opt.click()
if all_options_selected and not has_option:
all_options_selected = False
if focus_out:
browser_query.first.results[0].send_keys(Keys.TAB)
return all_options_selected
# Make sure specified option is actually selected
EmptyPromise(options_selected, "Option is selected").fulfill()
def is_option_value_selected(browser_query, value):
"""
return true if given value is selected in html select element, else return false.
"""
select = Select(browser_query.first.results[0])
ddl_selected_value = select.first_selected_option.get_attribute('value')
return ddl_selected_value == value
def element_has_text(page, css_selector, text):
"""
Return true if the given text is present in the list.
"""
text_present = False
text_list = page.q(css=css_selector).text
if len(text_list) > 0 and (text in text_list):
text_present = True
return text_present
def get_modal_alert(browser):
"""
Returns instance of modal alert box shown in browser after waiting
for 6 seconds
"""
WebDriverWait(browser, 6).until(EC.alert_is_present())
return browser.switch_to.alert
def get_element_padding(page, selector):
"""
Get Padding of the element with given selector,
:returns a dict object with the following keys.
1 - padding-top
2 - padding-right
3 - padding-bottom
4 - padding-left
Example Use:
progress_page.get_element_padding('.wrapper-msg.wrapper-auto-cert')
"""
js_script = """
var $element = $('%(selector)s');
element_padding = {
'padding-top': $element.css('padding-top').replace("px", ""),
'padding-right': $element.css('padding-right').replace("px", ""),
'padding-bottom': $element.css('padding-bottom').replace("px", ""),
'padding-left': $element.css('padding-left').replace("px", "")
};
return element_padding;
""" % {'selector': selector}
return page.browser.execute_script(js_script)
def is_404_page(browser):
""" Check if page is 404 """
return 'Page not found (404)' in browser.find_element_by_tag_name('h1').text
def create_multiple_choice_xml(correct_choice=2, num_choices=4):
"""
Return the Multiple Choice Problem XML, given the name of the problem.
"""
# all choices are incorrect except for correct_choice
choices = [False for _ in range(num_choices)]
choices[correct_choice] = True
choice_names = ['choice_{}'.format(index) for index in range(num_choices)]
question_text = 'The correct answer is Choice {}'.format(correct_choice)
return MultipleChoiceResponseXMLFactory().build_xml(
question_text=question_text,
choices=choices,
choice_names=choice_names,
)
def create_multiple_choice_problem(problem_name):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
"""
xml_data = create_multiple_choice_xml()
return XBlockFixtureDesc(
'problem',
problem_name,
data=xml_data,
metadata={'rerandomize': 'always'}
)
def auto_auth(browser, username, email, staff, course_id):
"""
Logout and login with given credentials.
"""
AutoAuthPage(browser, username=username, email=email, course_id=course_id, staff=staff).visit()
def assert_link(test, expected_link, actual_link):
"""
Assert that 'href' and text inside help DOM element are correct.
Arguments:
test: Test on which links are being tested.
expected_link (dict): The expected link attributes.
actual_link (dict): The actual link attribute on page.
"""
test.assertEqual(expected_link['href'], actual_link.get_attribute('href'))
test.assertEqual(expected_link['text'], actual_link.text)
def assert_opened_help_link_is_correct(test, url):
"""
Asserts that url of browser when help link is clicked is correct.
Arguments:
test (AcceptanceTest): test calling this method.
url (str): url to verify.
"""
test.browser.switch_to_window(test.browser.window_handles[-1])
# Assert that url in the browser is the same.
test.assertEqual(url, test.browser.current_url)
# Check that the URL loads. Can't do this in the browser because it might
# be loading a "Maze Found" missing content page.
response = requests.get(url)
test.assertEqual(response.status_code, 200, "URL {!r} returned {}".format(url, response.status_code))
EDX_BOOKS = {
'course_author': 'edx-partner-course-staff',
'learner': 'edx-guide-for-students',
}
OPEN_BOOKS = {
'course_author': 'open-edx-building-and-running-a-course',
'learner': 'open-edx-learner-guide',
}
def url_for_help(book_slug, path):
"""
Create a full help URL given a book slug and a path component.
"""
# Emulate the switch between books that happens in envs/bokchoy.py
books = EDX_BOOKS if RELEASE_LINE == "master" else OPEN_BOOKS
url = 'http://edx.readthedocs.io/projects/{}/en/{}{}'.format(books[book_slug], doc_version(), path)
return url
class EventsTestMixin(TestCase):
"""
Helpers and setup for running tests that evaluate events emitted
"""
def setUp(self):
super(EventsTestMixin, self).setUp()
self.event_collection = MongoClient()["test"]["events"]
self.start_time = datetime.now()
def reset_event_tracking(self):
"""Drop any events that have been collected thus far and start collecting again from scratch."""
self.event_collection.drop()
self.start_time = datetime.now()
@contextmanager
def capture_events(self, event_filter=None, number_of_matches=1, captured_events=None):
"""
Context manager that captures all events emitted while executing a particular block.
All captured events are stored in the list referenced by `captured_events`. Note that this list is appended to
*in place*. The events will be appended to the list in the order they are emitted.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` tells this context manager when enough events have been found and it can move on. The
context manager will not exit until this many events have passed the filter. If not enough events are found
before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that
*at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of
`captured_events`.
"""
start_time = datetime.utcnow()
yield
events = self.wait_for_events(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches)
if captured_events is not None and hasattr(captured_events, 'append') and callable(captured_events.append):
for event in events:
captured_events.append(event)
@contextmanager
def assert_events_match_during(self, event_filter=None, expected_events=None, in_order=True):
"""
Context manager that ensures that events matching the `event_filter` and `expected_events` are emitted.
This context manager will filter out the event stream using the `event_filter` and wait for
`len(expected_events)` to match the filter.
It will then compare the events in order with their counterpart in `expected_events` to ensure they match the
more detailed assertion.
Typically `event_filter` will be an `event_type` filter and the `expected_events` list will contain more
detailed assertions.
"""
captured_events = []
with self.capture_events(event_filter, len(expected_events), captured_events):
yield
self.assert_events_match(expected_events, captured_events, in_order=in_order)
def wait_for_events(self, start_time=None, event_filter=None, number_of_matches=1, timeout=None):
"""
Wait for `number_of_matches` events to pass the `event_filter`.
By default, this will look at all events that have been emitted since the beginning of the setup of this mixin.
A custom `start_time` can be specified which will limit the events searched to only those emitted after that
time.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` lets us know when enough events have been found and it can move on. The function will not
return until this many events have passed the filter. If not enough events are found before a timeout expires,
then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have
been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`.
Specifying a custom `timeout` can allow you to extend the default 30 second timeout if necessary.
"""
if start_time is None:
start_time = self.start_time
if timeout is None:
timeout = 30
def check_for_matching_events():
"""Gather any events that have been emitted since `start_time`"""
return self.matching_events_were_emitted(
start_time=start_time,
event_filter=event_filter,
number_of_matches=number_of_matches
)
return Promise(
check_for_matching_events,
# This is a bit of a hack, Promise calls str(description), so I set the description to an object with a
# custom __str__ and have it do some intelligent stuff to generate a helpful error message.
CollectedEventsDescription(
'Waiting for {number_of_matches} events to match the filter:\n{event_filter}'.format(
number_of_matches=number_of_matches,
event_filter=self.event_filter_to_descriptive_string(event_filter),
),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
),
timeout=timeout
).fulfill()
def matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Return True if enough events have been emitted that pass the `event_filter` since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
return len(matching_events) >= number_of_matches, matching_events
def get_matching_events_from_time(self, start_time=None, event_filter=None):
"""
Return a list of events that pass the `event_filter` and were emitted after `start_time`.
This function is used internally by most of the other assertions and convenience methods in this class.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
"""
if start_time is None:
start_time = self.start_time
if isinstance(event_filter, dict):
event_filter = functools.partial(is_matching_event, event_filter)
elif not callable(event_filter):
raise ValueError(
'event_filter must either be a dict or a callable function with as single "event" parameter that '
'returns a boolean value.'
)
matching_events = []
cursor = self.event_collection.find(
{
"time": {
"$gte": start_time
}
}
).sort("time", ASCENDING)
for event in cursor:
matches = False
try:
# Mongo automatically assigns an _id to all events inserted into it. We strip it out here, since
# we don't care about it.
del event['_id']
if event_filter is not None:
# Typically we will be grabbing all events of a particular type, however, you can use arbitrary
# logic to identify the events that are of interest.
matches = event_filter(event)
except AssertionError:
# allow the filters to use "assert" to filter out events
continue
else:
if matches is None or matches:
matching_events.append(event)
return matching_events
def assert_matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Assert that at least `number_of_matches` events have passed the filter since `start_time`."""
description = CollectedEventsDescription(
'Not enough events match the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
)
self.assertTrue(
self.matching_events_were_emitted(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches
),
description
)
def assert_no_matching_events_were_emitted(self, event_filter, start_time=None):
"""Assert that no events have passed the filter since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
description = CollectedEventsDescription(
'Events unexpected matched the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
lambda: matching_events
)
self.assertEquals(len(matching_events), 0, description)
def assert_events_match(self, expected_events, actual_events, in_order=True):
"""Assert that each actual event matches one of the expected events.
Args:
expected_events (List): a list of dicts representing the expected events.
actual_events (List): a list of dicts that were actually recorded.
in_order (bool): if True then the events must be in the same order (defaults to True).
"""
if in_order:
for expected_event, actual_event in zip(expected_events, actual_events):
assert_event_matches(
expected_event,
actual_event,
tolerate=EventMatchTolerates.lenient()
)
else:
for expected_event in expected_events:
actual_event = next(event for event in actual_events if is_matching_event(expected_event, event))
assert_event_matches(
expected_event,
actual_event or {},
tolerate=EventMatchTolerates.lenient()
)
def relative_path_to_absolute_uri(self, relative_path):
"""Return an aboslute URI given a relative path taking into account the test context."""
return urlparse.urljoin(BASE_URL, relative_path)
def event_filter_to_descriptive_string(self, event_filter):
"""Find the source code of the callable or pretty-print the dictionary"""
message = ''
if callable(event_filter):
file_name = '(unknown)'
try:
file_name = inspect.getsourcefile(event_filter)
except TypeError:
pass
try:
list_of_source_lines, line_no = inspect.getsourcelines(event_filter)
except IOError:
pass
else:
message = '{file_name}:{line_no}\n{hr}\n{event_filter}\n{hr}'.format(
event_filter=''.join(list_of_source_lines).rstrip(),
file_name=file_name,
line_no=line_no,
hr='-' * 20,
)
if not message:
message = '{hr}\n{event_filter}\n{hr}'.format(
event_filter=pprint.pformat(event_filter),
hr='-' * 20,
)
return message
class CollectedEventsDescription(object):
"""
Produce a clear error message when tests fail.
This class calls the provided `get_events_func` when converted to a string, and pretty prints the returned events.
"""
def __init__(self, description, get_events_func):
self.description = description
self.get_events_func = get_events_func
def __str__(self):
message_lines = [
self.description,
'Events:'
]
events = self.get_events_func()
events.sort(key=operator.itemgetter('time'), reverse=True)
for event in events[:MAX_EVENTS_IN_FAILURE_OUTPUT]:
message_lines.append(pprint.pformat(event))
if len(events) > MAX_EVENTS_IN_FAILURE_OUTPUT:
message_lines.append(
'Too many events to display, the remaining events were omitted. Run locally to diagnose.')
return '\n\n'.join(message_lines)
class AcceptanceTest(WebAppTest):
"""
The base class of all acceptance tests.
"""
def __init__(self, *args, **kwargs):
# Hack until we upgrade Firefox and install geckodriver in devstack and Jenkins
DesiredCapabilities.FIREFOX['marionette'] = False
super(AcceptanceTest, self).__init__(*args, **kwargs)
# Use long messages so that failures show actual and expected values
self.longMessage = True # pylint: disable=invalid-name
class UniqueCourseTest(AcceptanceTest):
"""
Test that provides a unique course ID.
"""
def setUp(self):
super(UniqueCourseTest, self).setUp()
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + XSS_INJECTION + self.unique_id
}
@property
def course_id(self):
"""
Returns the serialized course_key for the test
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
deprecated=(default_store == 'draft')
)
return unicode(course_key)
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
class YouTubeStubConfig(object):
"""
Configure YouTube Stub Server.
"""
PORT = 9080
URL = 'http://127.0.0.1:{}/'.format(PORT)
@classmethod
def configure(cls, config):
"""
Allow callers to configure the stub server using the /set_config URL.
Arguments:
config (dict): Configuration dictionary.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
@classmethod
def reset(cls):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
@classmethod
def get_configuration(cls):
"""
Allow callers to get current stub server configuration.
Returns:
dict
"""
youtube_stub_config_url = cls.URL + 'get_config'
response = requests.get(youtube_stub_config_url)
if response.ok:
return json.loads(response.content)
else:
return {}
def create_user_partition_json(partition_id, name, description, groups, scheme="random"):
"""
Helper method to create user partition JSON. If scheme is not supplied, "random" is used.
"""
# All that is persisted about a scheme is its name.
class MockScheme(object):
name = scheme
return UserPartition(
partition_id, name, description, groups, MockScheme()
).to_json()
def assert_nav_help_link(test, page, href, signed_in=True, close_window=True):
"""
Asserts that help link in navigation bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
signed_in (bool): Specifies whether user is logged in or not. (It affects the css)
close_window(bool): Close the newly-opened help window before continuing
"""
expected_link = {
'href': href,
'text': 'Help'
}
# Get actual anchor help element from the page.
actual_link = page.get_nav_help_element_and_click_help(signed_in)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
# Close the help window if not kept open intentionally
if close_window:
close_help_window(page)
def assert_side_bar_help_link(test, page, href, help_text, as_list_item=False, index=-1, close_window=True):
"""
Asserts that help link in side bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
as_list_item (bool): Specifies whether help element is in one of the
'li' inside a sidebar list DOM element.
index (int): The index of element in case there are more than
one matching elements.
close_window(bool): Close the newly-opened help window before continuing
"""
expected_link = {
'href': href,
'text': help_text
}
# Get actual anchor help element from the page.
actual_link = page.get_side_bar_help_element_and_click_help(as_list_item=as_list_item, index=index)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
# Close the help window if not kept open intentionally
if close_window:
close_help_window(page)
def close_help_window(page):
"""
Closes the help window
Args:
page (PageObject): Page object to perform tests on.
"""
browser_url = page.browser.current_url
if browser_url.startswith('https://edx.readthedocs.io') or browser_url.startswith('http://edx.readthedocs.io'):
page.browser.close() # close only the current window
page.browser.switch_to_window(page.browser.window_handles[0])
class TestWithSearchIndexMixin(object):
""" Mixin encapsulating search index creation """
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def _create_search_index(self):
""" Creates search index backing file """
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
def _cleanup_index_file(self):
""" Removes search index backing file """
remove_file(self.TEST_INDEX_FILENAME)
| fintech-circle/edx-platform | common/test/acceptance/tests/helpers.py | Python | agpl-3.0 | 34,563 | [
"VisIt"
] | 0fcf8ec22a354bc85e972d0ceabb65875ebd14f862fceef5caed273df86a3da0 |
#
# Copyright (C) 2002 greg Landrum and Rational Discovery LLC
#
""" generates license files for our primitive license handler
"""
from rdkit.utils import Licensing
import sha,base64,time,StringIO
import sys
_salt = Licensing._salt
def DataFromTextDate(dText,mods=None):
""" expected format: day-month-year
"""
splitD= dText.split('-')
if len(splitD)!=3:
sys.stderr.write('ERROR: date format is day-month-year\n')
sys.exit(0)
dateComponents = map(int,splitD)
day,month,year = dateComponents
if month > 12:
sys.stderr.write('ERROR: date format is day-month-year\n')
sys.exit(0)
dVal = int(time.mktime((year,month,day,
0,0,0,
0,0,0)))
digest = sha.new(base64.decodestring(_salt))
digest.update(dText)
digest.update(str(dVal))
if not mods:
res = """Expiration_Date: %s
Verification: %s
"""%(dText,digest.hexdigest())
else:
digest.update(mods.upper())
res = """Expiration_Date: %s
Modules: %s
Verification: %s
"""%(dText,mods,digest.hexdigest())
return res
if __name__ == '__main__':
import sys
d = sys.argv[1]
if len(sys.argv)>2:
mods = ','.join([x.strip() for x in sys.argv[2:]])
else:
mods = None
print DataFromTextDate(d,mods)
| rdkit/rdkit-orig | rdkit/utils/GenLicense.py | Python | bsd-3-clause | 1,289 | [
"RDKit"
] | bf53f06c3e85c758230bdd9a8c41cf1a3a956d62a9326bd9ff1f56668cc6f1ac |
""" Example smearing script
This script:
* Reads in mc spectra from hdf5
* Smears spectra, default is to use weighted Gaussian method, but can
also use specify random Gaussian method via command line
* Smeared spectrum is saved to the same directory with ``_smeared``
added to the file name
Examples:
To smear hdf5 file ``example.hdf5`` using the random Gaussian method::
$ python dump_smeared.py --smear_method "random" /path/to/example.hdf5
This will create the smeared hdf5 file ``/path/to/example_smeared.hdf5``.
.. note:: Valid smear methods include:
* "weight", default
* "random"
"""
import echidna.output.store as store
import echidna.core.smear as smear
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--smear_method", nargs='?', const="weight",
type=str, default="weight",
help="specify the smearing method to use")
parser.add_argument("path", type=str,
help="specify path to hdf5 file")
args = parser.parse_args()
directory = args.path[:args.path.rfind("/")+1] # strip filename
# strip directory and extension
filename = args.path[args.path.rfind("/")+1:args.path.rfind(".")]
smearer = smear.Smear()
spectrum = store.load(args.path)
if args.smear_method == "weight": # Use default smear method
smeared_spectrum = smearer.weight_gaussian_energy_spectra(spectrum)
smeared_spectrum = smearer.weight_gaussian_radius_spectra(smeared_spectrum)
elif args.smear_method == "random":
smeared_spectrum = smearer.random_gaussian_energy_spectra(spectrum)
smeared_spectrum = smearer.random_gaussian_radius_spectra(smeared_spectrum)
else: # Not a valid smear method
parser.error(args.smear_method + " is not a valid smear method")
filename = directory + filename + "_smeared" + ".hdf5"
store.dump(filename, smeared_spectrum)
| EdLeming/echidna | echidna/scripts/dump_smeared.py | Python | mit | 1,992 | [
"Gaussian"
] | 0a3706c48cf6119ffdf903f4e41370fe4fe6d4e0151a94477d165eaa2b1370a7 |
import os
from django.test import TestCase
from django.test.client import Client
from hq.models import ExtUser, Domain
from buildmanager.models import Project, ProjectBuild, BuildDownload
from buildmanager.tests.util import setup_build_objects
from datetime import datetime
class ViewsTestCase(TestCase):
def setUp(self):
domain, user, project, build = setup_build_objects()
self.domain = domain
self.user = user
self.project = project
self.build = build
self.client.login(username='brian',password='test')
def testDownloadCount(self):
# really basic test, hit the jad, check the counts, hit the jar, check the counts
self.assertEquals(0, len(BuildDownload.objects.all()))
self.assertEquals(0, len(self.build.downloads.all()))
self.assertEquals(0, len(self.project.downloads.all()))
response = self.client.get('/projects/%s/latest/%s' % (self.project.id, "dummy.jar"))
self.assertEquals(1, len(BuildDownload.objects.all()))
self.assertEquals(1, len(self.build.downloads.all()))
self.assertEquals(1, len(self.project.downloads.all()))
response = self.client.get('/projects/%s/latest/%s' % (self.project.id, "dummy.jad"))
self.assertEquals(2, len(BuildDownload.objects.all()))
self.assertEquals(2, len(self.build.downloads.all()))
self.assertEquals(2, len(self.project.downloads.all()))
self.assertEquals(1, len(BuildDownload.objects.filter(type="jad")))
self.assertEquals(1, len(BuildDownload.objects.filter(type="jar")))
def testBasicViews(self):
project = Project.objects.all()[0]
build = ProjectBuild.objects.all()[0]
response = self.client.get('/projects/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/projects/%s/' % project.id)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/projects/%s/latest/' % project.id)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
# TODO - fix this
"""
response = self.client.get('/projects/%s/latest/%s' % (formdef.id, filename))
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
"""
response = self.client.get('/builds/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
# TODO - fix
"""
response = self.client.get('/builds/%s/%s/%s' % project.id, build_number, filename)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
"""
response = self.client.get('/builds/show/%s/' % build.id)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
def tearDown(self):
user = ExtUser.objects.get(username='brian')
user.delete()
domain = Domain.objects.get(name='mockdomain')
domain.delete()
| commtrack/commtrack-old-to-del | apps/buildmanager/tests/views.py | Python | bsd-3-clause | 3,463 | [
"Brian"
] | ae0038c99b684da58286bcda479565646f9867d2ce9c3634d6a9d6b17dd364f8 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module defines the FeffInputSet abstract base class and a concrete
implementation for the Materials Project. The basic concept behind an input
set is to specify a scheme to generate a consistent set of Feff inputs from a
structure without further user intervention. This ensures comparability across
runs.
"""
import sys
import os
import abc
import six
from copy import deepcopy
import logging
from monty.serialization import loadfn
from monty.json import MSONable
from pymatgen.io.feff.inputs import Atoms, Tags, Potential, Header
__author__ = "Kiran Mathew"
__credits__ = "Alan Dozier, Anubhav Jain, Shyue Ping Ong"
__version__ = "1.1"
__maintainer__ = "Kiran Mathew"
__email__ = "[email protected]"
__date__ = "Sept 10, 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s: %(levelname)s: %(name)s: %(message)s')
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
class AbstractFeffInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract base class representing a set of Feff input parameters.
The idea is that using a FeffInputSet, a complete set of input files
(feffPOT, feffXANES, feffEXAFS, ATOMS, feff.inp)set_
can be generated in an automated fashion for any structure.
"""
@abc.abstractmethod
def header(self):
"""
Returns header to be used in feff.inp file from a pymatgen structure
"""
pass
@abc.abstractproperty
def atoms(self):
"""
Returns Atoms string from a structure that goes in feff.inp file.
Returns:
Atoms object.
"""
pass
@abc.abstractproperty
def tags(self):
"""
Returns standard calculation parameters.
"""
return
@abc.abstractproperty
def potential(self):
"""
Returns POTENTIAL section used in feff.inp from a structure.
"""
pass
def all_input(self):
"""
Returns all input files as a dict of {filename: feffio object}
"""
d = {"HEADER": self.header(), "PARAMETERS": self.tags}
if "RECIPROCAL" not in self.tags:
d.update({"POTENTIALS": self.potential, "ATOMS": self.atoms})
return d
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Writes a set of FEFF input to a directory.
Args:
output_dir: Directory to output the FEFF input files
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
feff = self.all_input()
feff_input = "\n\n".join(str(feff[k]) for k in
["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff)
for k, v in six.iteritems(feff):
with open(os.path.join(output_dir, k), "w") as f:
f.write(str(v))
with open(os.path.join(output_dir, "feff.inp"), "w") as f:
f.write(feff_input)
# write the structure to cif file
if "ATOMS" not in feff:
self.atoms.struct.to(fmt="cif",
filename=os.path.join(
output_dir, feff["PARAMETERS"]["CIF"]))
class FEFFDictSet(AbstractFeffInputSet):
"""
Standard implementation of FeffInputSet, which can be extended by specific
implementations.
"""
def __init__(self, absorbing_atom, structure, radius, config_dict,
edge="K", spectrum="EXAFS", nkpts=1000, user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
radius (float): cluster radius
config_dict (dict): control tag settings dict
edge (str): absorption edge
spectrum (str): type of spectrum to calculate, available options :
EXAFS, XANES, DANES, XMCD, ELNES, EXELFS, FPRIME, NRIXS, XES.
The default is EXAFS.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings. To delete
tags, set the key '_del' in the user_tag_settings.
eg: user_tag_settings={"_del": ["COREHOLE", "EXCHANGE"]}
"""
self.absorbing_atom = absorbing_atom
self.structure = structure
self.radius = radius
self.config_dict = deepcopy(config_dict)
self.edge = edge
self.spectrum = spectrum
self.nkpts = nkpts
self.user_tag_settings = user_tag_settings or {}
self.config_dict["EDGE"] = self.edge
self.config_dict.update(self.user_tag_settings)
if "_del" in self.user_tag_settings:
for tag in self.user_tag_settings["_del"]:
if tag in self.config_dict:
del self.config_dict[tag]
del self.config_dict["_del"]
# k-space feff only for small systems. The hardcoded system size in
# feff is around 14 atoms.
self.small_system = True if len(self.structure) < 14 else False
def header(self, source='', comment=''):
"""
Creates header string from structure object
Args:
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
comment: comment to include in header
Returns:
Header
"""
return Header(self.structure, source, comment)
@property
def tags(self):
"""
FEFF job parameters.
Returns:
Tags
"""
if "RECIPROCAL" in self.config_dict:
if self.small_system:
self.config_dict["CIF"] = "{}.cif".format(
self.structure.formula.replace(" ", ""))
self.config_dict["TARGET"] = self.atoms.center_index + 1
self.config_dict["COREHOLE"] = "RPA"
logger.warning("Setting COREHOLE = RPA for K-space calculation")
if not self.config_dict.get("KMESH", None):
abc = self.structure.lattice.abc
mult = (self.nkpts * abc[0] * abc[1] * abc[2]) ** (1 / 3)
self.config_dict["KMESH"] = [int(round(mult / l)) for l in abc]
else:
logger.warning("Large system(>=14 atoms), removing K-space settings")
del self.config_dict["RECIPROCAL"]
self.config_dict.pop("CIF", None)
self.config_dict.pop("TARGET", None)
self.config_dict.pop("KMESH", None)
self.config_dict.pop("STRFAC", None)
return Tags(self.config_dict)
@property
def potential(self):
"""
FEFF potential
Returns:
Potential
"""
return Potential(self.structure, self.absorbing_atom)
@property
def atoms(self):
"""
absorber + the rest
Returns:
Atoms
"""
return Atoms(self.structure, self.absorbing_atom, self.radius)
def __str__(self):
output = [self.spectrum]
output.extend(["%s = %s" % (k, str(v))
for k, v in six.iteritems(self.config_dict)])
output.append("")
return "\n".join(output)
class MPXANESSet(FEFFDictSet):
"""
FeffDictSet for XANES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
def __init__(self, absorbing_atom, structure, edge="K", radius=10.,
nkpts=1000, user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super(MPXANESSet, self).__init__(absorbing_atom, structure, radius,
MPXANESSet.CONFIG, edge=edge,
spectrum="XANES", nkpts=nkpts,
user_tag_settings=user_tag_settings)
class MPEXAFSSet(FEFFDictSet):
"""
FeffDictSet for EXAFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXAFSSet.yaml"))
def __init__(self, absorbing_atom, structure, edge="K", radius=10.,
nkpts=1000, user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super(MPEXAFSSet, self).__init__(absorbing_atom, structure, radius,
MPEXAFSSet.CONFIG, edge=edge,
spectrum="EXAFS", nkpts=nkpts,
user_tag_settings=user_tag_settings)
class MPEELSDictSet(FEFFDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
def __init__(self, absorbing_atom, structure, edge, spectrum, radius,
beam_energy, beam_direction, collection_angle,
convergence_angle, config_dict, user_eels_settings=None,
nkpts=1000, user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
spectrum (str): ELNES or EXELFS
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
self.beam_energy = beam_energy
self.beam_direction = beam_direction
self.collection_angle = collection_angle
self.convergence_angle = convergence_angle
self.user_eels_settings = user_eels_settings
eels_config_dict = deepcopy(config_dict)
if beam_direction:
beam_energy_list = [beam_energy, 0, 1, 1]
eels_config_dict[spectrum]["BEAM_DIRECTION"] = beam_direction
else:
beam_energy_list = [beam_energy, 1, 0, 1]
del eels_config_dict[spectrum]["BEAM_DIRECTION"]
eels_config_dict[spectrum]["BEAM_ENERGY"] = beam_energy_list
eels_config_dict[spectrum]["ANGLES"] = [collection_angle,
convergence_angle]
if user_eels_settings:
eels_config_dict[spectrum].update(user_eels_settings)
super(MPEELSDictSet, self).__init__(absorbing_atom, structure, radius,
eels_config_dict, edge=edge,
spectrum=spectrum, nkpts=nkpts,
user_tag_settings=user_tag_settings)
class MPELNESSet(MPEELSDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPELNESSet.yaml"))
def __init__(self, absorbing_atom, structure, edge="K", radius=10.,
beam_energy=100, beam_direction=None, collection_angle=1,
convergence_angle=1, user_eels_settings=None, nkpts=1000,
user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super(MPELNESSet, self).__init__(absorbing_atom, structure, edge,
"ELNES", radius, beam_energy,
beam_direction, collection_angle,
convergence_angle, MPELNESSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts, user_tag_settings=user_tag_settings)
class MPEXELFSSet(MPEELSDictSet):
"""
FeffDictSet for EXELFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXELFSSet.yaml"))
def __init__(self, absorbing_atom, structure, edge="K", radius=10.,
beam_energy=100, beam_direction=None, collection_angle=1,
convergence_angle=1, user_eels_settings=None, nkpts=1000,
user_tag_settings=None):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPEXELFSSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super(MPEXELFSSet, self).__init__(absorbing_atom, structure, edge,
"EXELFS", radius, beam_energy,
beam_direction, collection_angle,
convergence_angle, MPEXELFSSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts, user_tag_settings=user_tag_settings)
| xhqu1981/pymatgen | pymatgen/io/feff/sets.py | Python | mit | 16,295 | [
"FEFF",
"pymatgen"
] | 60444c8f0510e8d52219c60a9e37e75a6ef8fa6ac3ec6f4dc99f275334ccc593 |
#!/usr/bin/python
####
# 02/2006 Will Holcomb <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 7/26/07 Slightly modified by Brian Schneider
# in order to support unicode files ( multipart_encode function )
"""
Usage:
Enables the use of multipart/form-data for posting forms
Inspirations:
Upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
urllib2_file:
Fabien Seisen: <[email protected]>
Example:
import MultipartPostHandler, urllib2, cookielib
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
params = { "username" : "bob", "password" : "riviera",
"file" : open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
Further Example:
The main function of this file is a sample which downloads a page and
then uploads it to the W3C validator.
"""
import urllib
import urllib2
import mimetools, mimetypes
import os, stat
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buf = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buf is None:
buf = StringIO()
for(key, value) in vars:
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"' % key)
buf.write('\r\n\r\n' + value + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buf.write('Content-Type: %s\r\n' % contenttype)
# buffer += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf.write('\r\n' + fd.read() + '\r\n')
buf.write('--' + boundary + '--\r\n\r\n')
buf = buf.getvalue()
return boundary, buf
multipart_encode = Callable(multipart_encode)
https_request = http_request
def main():
import tempfile, sys
validatorURL = "http://validator.w3.org/check"
opener = urllib2.build_opener(MultipartPostHandler)
def validateFile(url):
temp = tempfile.mkstemp(suffix=".html")
os.write(temp[0], opener.open(url).read())
params = { "ss" : "0", # show source
"doctype" : "Inline",
"uploaded_file" : open(temp[1], "rb") }
print opener.open(validatorURL, params).read()
os.remove(temp[1])
if len(sys.argv[1:]) > 0:
for arg in sys.argv[1:]:
validateFile(arg)
else:
validateFile("http://www.google.com")
if __name__=="__main__":
main()
| Miners/MinusAPI | nautilus-minus/minus_utils/multipart.py | Python | apache-2.0 | 5,044 | [
"Brian"
] | c4ac4c7cf13b7049afabc1610edea491b6ee344f0be5e2d272341443ecd25f28 |
"""SOM utils"""
# Adapted from:
# Vahid Moosavi 2015 05 12 09:04 pm
# [email protected]
# Chair For Computer Aided Architectural Design, ETH Zurich
# Future Cities Lab
# www.vahidmoosavi.com
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import numexpr as ne
from time import time
import scipy.spatial as spdist
import timeit
import sys
from joblib import Parallel, delayed
from joblib import load, dump
import tempfile
import shutil
import os
import itertools
from scipy.sparse import csr_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from sklearn import neighbors
from matplotlib.colors import LogNorm
from matplotlib import cm
import matplotlib
import pandas as pd
class SOM(object):
def __init__(self, name, Data, mapsize=None, norm_method='var', initmethod='pca', neigh='Guassian'):
"""
name and data, neigh== Bubble or Guassian
"""
self.name = name
self.data_raw = Data
if norm_method == 'var':
Data = normalize(Data, method=norm_method)
self.data = Data
else:
self.data = Data
self.dim = Data.shape[1]
self.dlen = Data.shape[0]
self.set_topology(mapsize=mapsize)
self.set_algorithm(initmethod=initmethod)
self.calc_map_dist()
self.neigh = neigh
# Slow for large data sets
# self.set_data_labels()
# set SOM topology
def set_topology(self, mapsize=None, mapshape='planar', lattice='rect', mask=None, compname=None):
"""
all_mapshapes = ['planar','toroid','cylinder']
all_lattices = ['hexa','rect']
"""
self.mapshape = mapshape
self.lattice = lattice
# to set mask
if mask == None:
self.mask = np.ones([1, self.dim])
else:
self.mask = mask
# to set map size
if mapsize == None:
tmp = int(round(np.sqrt(self.dlen)))
self.nnodes = tmp
self.mapsize = [int(3. / 5 * self.nnodes), int(2. / 5 * self.nnodes)]
else:
if len(mapsize) == 2:
if np.min(mapsize) == 1:
self.mapsize = [1, np.max(mapsize)]
else:
self.mapsize = mapsize
elif len(mapsize) == 1:
s = int(mapsize[0] / 2)
self.mapsize = [1, mapsize[0]]
print('input was considered as the numbers of nodes')
print('map size is [{0},{1}]'.format(s, s))
self.nnodes = self.mapsize[0] * self.mapsize[1]
# to set component names
if compname == None:
try:
cc = list()
for i in range(0, self.dim):
cc.append('Variable-' + str(i + 1))
self.compname = np.asarray(cc)[np.newaxis, :]
except:
pass
print('no data yet: plesae first set trainign data to the SOM')
else:
try:
dim = getattr(self, 'dim')
if len(compname) == dim:
self.compname = np.asarray(compname)[np.newaxis, :]
else:
print('compname should have the same size')
except:
pass
print('no data yet: plesae first set trainign data to the SOM')
# Set labels of the training data
# it should be in the format of a list of strings
def set_data_labels(self, dlabel=None):
if dlabel == None:
try:
dlen = (getattr(self, 'dlen'))
cc = list()
for i in range(0, dlen):
cc.append('dlabel-' + str(i))
self.dlabel = np.asarray(cc)[:, np.newaxis]
except:
pass
print('no data yet: plesae first set trainign data to the SOM')
else:
try:
dlen = (getattr(self, 'dlen'))
if dlabel.shape == (1, dlen):
self.dlabel = dlabel.T # [:,np.newaxis]
elif dlabel.shape == (dlen, 1):
self.dlabel = dlabel
elif dlabel.shape == (dlen,):
self.dlabel = dlabel[:, np.newaxis]
else:
print('wrong lable format')
except:
pass
print('no data yet: plesae first set trainign data to the SOM')
# calculating the grid distance, which will be called during the training steps
# currently just works for planar grids
def calc_map_dist(self):
cd = getattr(self, 'nnodes')
UD2 = np.zeros((cd, cd))
for i in range(cd):
UD2[i, :] = grid_dist(self, i).reshape(1, cd)
self.UD2 = UD2
def set_algorithm(self, initmethod='pca', algtype='batch', neighborhoodmethod='gaussian', alfatype='inv',
alfaini=.5, alfafinal=.005):
"""
initmethod = ['random', 'pca']
algos = ['seq','batch']
all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ]
alfa_types = ['linear','inv','power']
"""
self.initmethod = initmethod
self.algtype = algtype
self.alfaini = alfaini
self.alfafinal = alfafinal
self.neigh = neighborhoodmethod
###################################
# visualize map
def view_map(self, what='codebook', which_dim='all', pack='Yes', text_size=2.8, save='No', save_dir='empty',
grid='No', text='Yes', cmap='None', COL_SiZe=6):
mapsize = getattr(self, 'mapsize')
if np.min(mapsize) > 1:
if pack == 'No':
view_2d(self, text_size, which_dim=which_dim, what=what)
else:
# print 'hi'
view_2d_Pack(self, text_size, which_dim=which_dim, what=what, save=save, save_dir=save_dir, grid=grid,
text=text, CMAP=cmap, col_sz=COL_SiZe)
elif np.min(mapsize) == 1:
view_1d(self, text_size, which_dim=which_dim, what=what)
################################################################################
# Initialize map codebook: Weight vectors of SOM
def init_map(self):
dim = 0
n_nod = 0
if getattr(self, 'initmethod') == 'random':
# It produces random values in the range of min- max of each dimension based on a uniform distribution
mn = np.tile(np.min(getattr(self, 'data'), axis=0), (getattr(self, 'nnodes'), 1))
mx = np.tile(np.max(getattr(self, 'data'), axis=0), (getattr(self, 'nnodes'), 1))
setattr(self, 'codebook', mn + (mx - mn) * (np.random.rand(getattr(self, 'nnodes'), getattr(self, 'dim'))))
elif getattr(self, 'initmethod') == 'pca':
codebooktmp = lininit(self) # it is based on two largest eigenvalues of correlation matrix
setattr(self, 'codebook', codebooktmp)
else:
print('please select a corect initialization method')
print('set a correct one in SOM. current SOM.initmethod: ', getattr(self, 'initmethod'))
print("possible init methods:'random', 'pca'")
# Main loop of training
def train(self, trainlen=None, n_job=1, shared_memory='no', verbose='on'):
t0 = time()
data = getattr(self, 'data')
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
mem = np.log10(dlen * nnodes * dim)
# print 'data len is %d and data dimension is %d' % (dlen, dim)
# print 'map size is %d, %d' %(mapsize[0], mapsize[1])
# print 'array size in log10 scale' , mem
# print 'nomber of jobs in parallel: ', n_job
#######################################
# initialization
if verbose == 'on':
print()
print('initialization method = %s, initializing..' % getattr(self, 'initmethod'))
print()
t0 = time()
self.init_map()
if verbose == 'on':
print('initialization done in %f seconds' % round(time() - t0, 3))
########################################
# rough training
if verbose == 'on':
print()
batchtrain(self, njob=n_job, phase='rough', shared_memory='no', verbose=verbose)
if verbose == 'on':
print()
#######################################
# Finetuning
if verbose == 'on':
print()
batchtrain(self, njob=n_job, phase='finetune', shared_memory='no', verbose=verbose)
err = np.mean(getattr(self, 'bmu')[1])
if verbose == 'on':
# or verbose == 'off':
# print
ts = round(time() - t0, 3)
print()
print("Total time elapsed: %f secodns" % ts)
print("final quantization error: %f" % err)
if verbose == 'final':
# or verbose == 'off':
# print
ts = round(time() - t0, 3)
print()
print("Total time elapsed: %f secodns" % ts)
print("final quantization error: %f" % err)
# to project a data set to a trained SOM and find the index of bmu
# It is based on nearest neighborhood search module of scikitlearn, but it is not that fast.
def project_data(self, data):
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors=1)
labels = np.arange(0, codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
# data = normalize(data, method='var')
# plt.hist(data[:,2])
Predicted_labels = clf.predict(data)
return Predicted_labels
def predict_by(self, data, Target, K=5, wt='distance'):
"""
uniform
"""
# here it is assumed that Target is the last column in the codebook
# and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
dim = codebook.shape[1]
ind = np.arange(0, dim)
indX = ind[ind != Target]
X = codebook[:, indX]
Y = codebook[:, Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights=wt)
clf.fit(X, Y)
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:, Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:, indX]
elif dimdata == dim - 1:
data = normalize_by(data_raw[:, indX], data, method='var')
# data = normalize(data, method='var')
Predicted_values = clf.predict(data)
Predicted_values = denormalize_by(data_raw[:, Target], Predicted_values)
return Predicted_values
def predict(self, X_test, K=5, wt='distance'):
"""
uniform
"""
# Similar to SKlearn we assume that we have X_tr, Y_tr and X_test
# here it is assumed that Target is the last column in the codebook
# and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
dim = codebook.shape[1]
Target = data_raw.shape[1] - 1
X_train = codebook[:, :Target]
Y_train = codebook[:, Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights=wt)
clf.fit(X_train, Y_train)
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
X_test = normalize_by(data_raw[:, :Target], X_test, method='var')
Predicted_values = clf.predict(X_test)
Predicted_values = denormalize_by(data_raw[:, Target], Predicted_values)
return Predicted_values
def find_K_nodes(self, data, K=5):
from sklearn.neighbors import NearestNeighbors
# we find the k most similar nodes to the input vector
codebook = getattr(self, 'codebook')
neigh = NearestNeighbors(n_neighbors=K)
neigh.fit(codebook)
data_raw = getattr(self, 'data_raw')
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
return neigh.kneighbors(data)
def ind_to_xy(self, bm_ind):
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
# bmu should be an integer between 0 to no_nodes
out = np.zeros((bm_ind.shape[0], 3))
out[:, 2] = bm_ind
out[:, 0] = rows - 1 - bm_ind / cols
out[:, 0] = bm_ind / cols
out[:, 1] = bm_ind % cols
return out.astype(int)
def cluster(self, method='Kmeans', n_clusters=8):
import sklearn.cluster as clust
km = clust.KMeans(n_clusters=n_clusters)
labels = km.fit_predict(denormalize_by(self.data_raw, self.codebook, n_method='var'))
setattr(self, 'cluster_labels', labels)
return labels
def hit_map(self, data=None):
# First Step: show the hitmap of all the training data
# print 'None'
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
# this is not an appropriate way, but it works
coord[:, 0] = msz[0] - coord[:, 0]
###############################
fig = plt.figure(figsize=(msz[1] / 5, msz[0] / 5))
ax = fig.add_subplot(111)
ax.xaxis.set_ticks([i for i in range(0, msz[1])])
ax.yaxis.set_ticks([i for i in range(0, msz[0])])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True, linestyle='-', linewidth=.5)
a = plt.hist2d(coord[:, 1], coord[:, 0], bins=(msz[1], msz[0]), alpha=.0, norm=LogNorm(), cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5, msz[1] + .5, 1)
y = np.arange(.5, msz[0] + .5, 1)
X, Y = np.meshgrid(x, y)
area = a[0].T * 12
plt.scatter(X, Y, s=area, alpha=0.2, c='b', marker='o', cmap='jet', linewidths=3, edgecolor='r')
plt.scatter(X, Y, s=area, alpha=0.9, c='None', marker='o', cmap='jet', linewidths=3, edgecolor='r')
plt.xlim(0, msz[1])
plt.ylim(0, msz[0])
if data != None:
proj = self.project_data(data)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
a = plt.hist2d(coord[:, 1], coord[:, 0], bins=(msz[1], msz[0]), alpha=.0, norm=LogNorm(), cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5, msz[1] + .5, 1)
y = np.arange(.5, msz[0] + .5, 1)
X, Y = np.meshgrid(x, y)
area = a[0].T * 50
plt.scatter(X, Y, s=area, alpha=0.2, c='b', marker='o', cmap='jet', linewidths=3, edgecolor='r')
plt.scatter(X, Y, s=area, alpha=0.9, c='None', marker='o', cmap='jet', linewidths=3, edgecolor='r')
plt.xlim(0, msz[1])
plt.ylim(0, msz[0])
plt.show()
def hit_map_cluster_number(self, data=None):
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
# print 'yesyy'
else:
print('clustering based on default parameters...')
codebook = self.cluster()
msz = getattr(self, 'mapsize')
fig = plt.figure(figsize=(msz[1] / 2.5, msz[0] / 2.5))
ax = fig.add_subplot(111)
# ax.xaxis.set_ticklabels([])
# ax.yaxis.set_ticklabels([])
# ax.grid(True,linestyle='-', linewidth=.5)
if data == None:
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
coord = self.ind_to_xy(proj)
cents = self.ind_to_xy(np.arange(0, msz[0] * msz[1]))
for i, txt in enumerate(codebook):
ax.annotate(txt, (cents[i, 1], cents[i, 0]), size=10, va="center")
if data != None:
proj = self.project_data(data)
coord = self.ind_to_xy(proj)
x = np.arange(.5, msz[1] + .5, 1)
y = np.arange(.5, msz[0] + .5, 1)
cents = self.ind_to_xy(proj)
# cents[:,1] = cents[:,1]+.2
# print cents.shape
label = codebook[proj]
for i, txt in enumerate(label):
ax.annotate(txt, (cents[i, 1], cents[i, 0]), size=10, va="center")
plt.imshow(codebook.reshape(msz[0], msz[1])[::], alpha=.5)
# plt.pcolor(codebook.reshape(msz[0],msz[1])[::-1],alpha=.5,cmap='jet')
plt.show()
return cents
def view_map_dot(self, colormap=None, cols=None, save='No', save_dir='', text_size=8):
if colormap == None:
colormap = plt.cm.get_cmap('jet_r')
data = self.data_raw
proj = self.project_data(data)
coords = self.ind_to_xy(proj)[:, :2]
fig = plt.figure()
if cols == None:
cols = 8
rows = data.shape[1] / cols + 1
for i in range(data.shape[1]):
plt.subplot(rows, cols, i + 1)
mn = data[:, i].min()
mx = data[:, i].max()
plt.scatter(coords[:, 1], self.mapsize[0] - 1 - coords[:, 0], c=data[:, i], vmax=mx, vmin=mn, s=180,
marker='.', edgecolor='None', cmap=colormap, alpha=1)
eps = .75
plt.xlim(0 - eps, self.mapsize[1] - 1 + eps)
plt.ylim(0 - eps, self.mapsize[0] - 1 + eps)
plt.axis('off')
plt.title(self.compname[0][i])
font = {'size': text_size}
plt.rc('font', **font)
plt.axis('on')
plt.xticks([])
plt.yticks([])
plt.tight_layout()
# plt.colorbar(sc,ticks=np.round(np.linspace(mn,mx,5),decimals=1),shrink=0.6)
plt.subplots_adjust(hspace=.16, wspace=.05)
fig.set_size_inches(20, 20)
if save == 'Yes':
if save_dir != 'empty':
fig.savefig(save_dir, transparent=False, dpi=200)
else:
add = '/Users/itadmin/Desktop/SOM_dot.png'
print('save directory: ', add)
fig.savefig(add, transparent=False, dpi=200)
def predict_Probability(self, data, Target, K=5):
# here it is assumed that Target is the last column in the codebook #and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
dim = codebook.shape[1]
ind = np.arange(0, dim)
indX = ind[ind != Target]
X = codebook[:, indX]
Y = codebook[:, Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights='distance')
clf.fit(X, Y)
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:, Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:, indX]
elif dimdata == dim - 1:
data = normalize_by(data_raw[:, indX], data, method='var')
# data = normalize(data, method='var')
weights, ind = clf.kneighbors(data, n_neighbors=K, return_distance=True)
weights = 1. / weights
sum_ = np.sum(weights, axis=1)
weights = weights / sum_[:, np.newaxis]
labels = np.sign(codebook[ind, Target])
labels[labels >= 0] = 1
# for positives
pos_prob = labels.copy()
pos_prob[pos_prob < 0] = 0
pos_prob = pos_prob * weights
pos_prob = np.sum(pos_prob, axis=1)[:, np.newaxis]
# for negatives
neg_prob = labels.copy()
neg_prob[neg_prob > 0] = 0
neg_prob = neg_prob * weights * -1
neg_prob = np.sum(neg_prob, axis=1)[:, np.newaxis]
# Predicted_values = clf.predict(data)
# Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return np.concatenate((pos_prob, neg_prob), axis=1)
def node_Activation(self, data, wt='distance', Target=None):
"""
uniform
"""
if Target == None:
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors=getattr(self, 'nnodes'))
labels = np.arange(0, codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
# we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
weights, ind = clf.kneighbors(data)
##Softmax function
weights = 1. / weights
S_ = np.sum(np.exp(weights), axis=1)[:, np.newaxis]
weights = np.exp(weights) / S_
return weights, ind
#
def para_bmu_find(self, x, y, njb=1):
dlen = x.shape[0]
Y2 = None
Y2 = np.einsum('ij,ij->i', y, y)
bmu = None
b = None
# here it finds BMUs for chunk of data in parallel
t_temp = time()
b = Parallel(n_jobs=njb, pre_dispatch='3*n_jobs')(delayed(chunk_based_bmu_find) \
(self,
x[i * dlen // njb:min((i + 1) * dlen // njb, dlen)], y,
Y2) \
for i in range(njb))
# print 'bmu finding: %f seconds ' %round(time() - t_temp, 3)
t1 = time()
bmu = np.asarray(list(itertools.chain(*b))).T
# print 'bmu to array: %f seconds' %round(time() - t1, 3)
del b
return bmu
# First finds the Voronoi set of each node. It needs to calculate a smaller matrix. Super fast comparing to classic batch training algorithm
# it is based on the implemented algorithm in som toolbox for Matlab by Helsinky university
def update_codebook_voronoi(self, training_data, bmu, H, radius):
# bmu has shape of 2,dlen, where first row has bmuinds
# we construct ud2 from precomputed UD2 : ud2 = UD2[bmu[0,:]]
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
New_Codebook = np.empty((nnodes, dim))
inds = bmu[0].astype(int)
# print 'bmu', bmu[0]
# fig = plt.hist(bmu[0],bins=100)
# plt.show()
row = inds
col = np.arange(dlen)
val = np.tile(1, dlen)
P = csr_matrix((val, (row, col)), shape=(nnodes, dlen))
S = np.empty((nnodes, dim))
S = P.dot(training_data)
# assert( S.shape == (nnodes, dim))
# assert( H.shape == (nnodes, nnodes))
# H has nnodes*nnodes and S has nnodes*dim ---> Nominator has nnodes*dim
# print Nom
Nom = np.empty((nnodes, nnodes))
Nom = H.T.dot(S)
# assert( Nom.shape == (nnodes, dim))
nV = np.empty((1, nnodes))
nV = P.sum(axis=1).reshape(1, nnodes)
# print 'nV', nV
# print 'H'
# print H
# assert(nV.shape == (1, nnodes))
Denom = np.empty((nnodes, 1))
Denom = nV.dot(H.T).reshape(nnodes, 1)
# print 'Denom'
# print Denom
# assert( Denom.shape == (nnodes, 1))
New_Codebook = np.divide(Nom, Denom)
# print 'codebook'
# print New_Codebook.sum(axis=1)
Nom = None
Denom = None
# assert (New_Codebook.shape == (nnodes,dim))
# setattr(som, 'codebook', New_Codebook)
return np.around(New_Codebook, decimals=6)
# we will call this function in parallel for different number of jobs
def chunk_based_bmu_find(self, x, y, Y2):
dim = x.shape[1]
dlen = x.shape[0]
nnodes = y.shape[0]
bmu = np.empty((dlen, 2))
# it seems that smal batches for large dlen is really faster:
# that is because of ddata in loops and n_jobs. for large data it slows down due to memory needs in parallel
blen = min(50, dlen)
i0 = 0;
d = None
t = time()
while i0 + 1 <= dlen:
Low = (i0)
High = min(dlen, i0 + blen)
i0 = i0 + blen
ddata = x[Low:High + 1]
d = np.dot(y, ddata.T)
d *= -2
d += Y2.reshape(nnodes, 1)
bmu[Low:High + 1, 0] = np.argmin(d, axis=0)
bmu[Low:High + 1, 1] = np.min(d, axis=0)
del ddata
d = None
return bmu
# Batch training which is called for rought training as well as finetuning
def batchtrain(self, njob=1, phase=None, shared_memory='no', verbose='on'):
t0 = time()
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
#############################################
# seting the parameters
initmethod = getattr(self, 'initmethod')
mn = np.min(mapsize)
if mn == 1:
mpd = float(nnodes * 10) / float(dlen)
else:
mpd = float(nnodes) / float(dlen)
ms = max(mapsize[0], mapsize[1])
if mn == 1:
ms = ms / 2.
# Based on somtoolbox, Matlab
# case 'train', sTrain.trainlen = ceil(50*mpd);
# case 'rough', sTrain.trainlen = ceil(10*mpd);
# case 'finetune', sTrain.trainlen = ceil(40*mpd);
if phase == 'rough':
# training length
trainlen = int(np.ceil(30 * mpd))
# radius for updating
if initmethod == 'random':
radiusin = max(1, np.ceil(ms / 3.))
radiusfin = max(1, radiusin / 6.)
# radiusin = max(1, np.ceil(ms/1.))
# radiusfin = max(1, radiusin/2.)
elif initmethod == 'pca':
radiusin = max(1, np.ceil(ms / 8.))
radiusfin = max(1, radiusin / 4.)
elif phase == 'finetune':
# train lening length
# radius for updating
if initmethod == 'random':
trainlen = int(np.ceil(50 * mpd))
radiusin = max(1, ms / 12.) # from radius fin in rough training
radiusfin = max(1, radiusin / 25.)
# radiusin = max(1, ms/2.) #from radius fin in rough training
# radiusfin = max(1, radiusin/2.)
elif initmethod == 'pca':
trainlen = int(np.ceil(40 * mpd))
radiusin = max(1, np.ceil(ms / 8.) / 4)
radiusfin = 1 # max(1, ms/128)
radius = np.linspace(radiusin, radiusfin, trainlen)
##################################################
UD2 = getattr(self, 'UD2')
New_Codebook_V = np.empty((nnodes, dim))
New_Codebook_V = getattr(self, 'codebook')
# print 'data is in shared memory?', shared_memory
if shared_memory == 'yes':
data = getattr(self, 'data')
Data_folder = tempfile.mkdtemp()
data_name = os.path.join(Data_folder, 'data')
dump(data, data_name)
data = load(data_name, mmap_mode='r')
else:
data = getattr(self, 'data')
# X2 is part of euclidean distance (x-y)^2 = x^2 +y^2 - 2xy that we use for each data row in bmu finding.
# Since it is a fixed value we can skip it during bmu finding for each data point, but later we need it calculate quantification error
X2 = np.einsum('ij,ij->i', data, data)
if verbose == 'on':
print('%s training...' % phase)
print('radius_ini: %f , radius_final: %f, trainlen: %d' % (radiusin, radiusfin, trainlen))
neigh_func = getattr(self, 'neigh')
for i in range(trainlen):
if neigh_func == 'Guassian':
# in case of Guassian neighborhood
H = np.exp(-1.0 * UD2 / (2.0 * radius[i] ** 2)).reshape(nnodes, nnodes)
if neigh_func == 'Bubble':
# in case of Bubble function
# print radius[i], UD2.shape
# print UD2
H = l(radius[i], np.sqrt(UD2.flatten())).reshape(nnodes, nnodes) + .000000000001
# print H
t1 = time()
bmu = None
bmu = self.para_bmu_find(data, New_Codebook_V, njb=njob)
if verbose == 'on':
print()
# updating the codebook
t2 = time()
New_Codebook_V = self.update_codebook_voronoi(data, bmu, H, radius)
# print 'updating nodes: ', round (time()- t2, 3)
if verbose == 'on':
print("epoch: %d ---> elapsed time: %f, quantization error: %f " % (
i + 1, round(time() - t1, 3), np.mean(np.sqrt(bmu[1] + X2))))
setattr(self, 'codebook', New_Codebook_V)
bmu[1] = np.sqrt(bmu[1] + X2)
setattr(self, 'bmu', bmu)
def grid_dist(self, bmu_ind):
"""
som and bmu_ind
depending on the lattice "hexa" or "rect" we have different grid distance
functions.
bmu_ind is a number between 0 and number of nodes-1. depending on the map size
bmu_coord will be calculated and then distance matrix in the map will be returned
"""
try:
lattice = getattr(self, 'lattice')
except:
lattice = 'hexa'
print('lattice not found! Lattice as hexa was set')
if lattice == 'rect':
return rect_dist(self, bmu_ind)
elif lattice == 'hexa':
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
rows = 0.
cols = 0.
pass
# needs to be implemented
print('to be implemented', rows, cols)
return np.zeros((rows, cols))
def rect_dist(self, bmu):
# the way we consider the list of nodes in a planar grid is that node0 is on top left corner,
# nodemapsz[1]-1 is top right corner and then it goes to the second row.
# no. of rows is map_size[0] and no. of cols is map_size[1]
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
pass
# bmu should be an integer between 0 to no_nodes
if 0 <= bmu <= (rows * cols):
c_bmu = int(bmu % cols)
r_bmu = int(bmu / cols)
else:
print('wrong bmu')
# calculating the grid distance
if np.logical_and(rows > 0, cols > 0):
r, c = np.arange(0, rows, 1)[:, np.newaxis], np.arange(0, cols, 1)
dist2 = (r - r_bmu) ** 2 + (c - c_bmu) ** 2
return dist2.ravel()
else:
print('please consider the above mentioned errors')
return np.zeros((rows, cols)).ravel()
def view_2d(self, text_size, which_dim='all', what='codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print('first initialize codebook')
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0, dim).T
ratio = float(dim) / float(dim)
ratio = np.max((.35, ratio))
sH, sV = 16, 16 * ratio * 1
plt.figure(figsize=(sH, sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6, 6
plt.figure(figsize=(sH, sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim) / float(max_dim)
# print max_dim, dim, ratio
ratio = np.max((.35, ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16, 16 * ratio * 1
plt.figure(figsize=(sH, sV))
no_row_in_plot = dim / 6 + 1 # 6 is arbitrarily selected
if no_row_in_plot <= 1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
norm = matplotlib.colors.normalize(vmin=np.mean(codebook.flatten()) - 1 * np.std(codebook.flatten()),
vmax=np.mean(codebook.flatten()) + 1 * np.std(codebook.flatten()), clip=True)
while axisNum < dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum - 1])
mp = codebook[:, ind].reshape(msz0, msz1)
pl = plt.pcolor(mp[::-1], norm=norm)
# pl = plt.imshow(mp[::-1])
plt.title(compname[0][ind])
font = {'size': text_size * sH / no_col_in_plot}
plt.rc('font', **font)
plt.axis('off')
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.colorbar(pl)
plt.show()
def view_2d_Pack(self, text_size, which_dim='all', what='codebook', save='No', grid='Yes', save_dir='empty', text='Yes',
CMAP='None', col_sz=None):
import matplotlib.cm as cm
msz0, msz1 = getattr(self, 'mapsize')
if CMAP == 'None':
CMAP = cm.RdYlBu_r
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print('first initialize codebook')
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0, dim).T
ratio = float(dim) / float(dim)
ratio = np.max((.35, ratio))
sH, sV = 16, 16 * ratio * 1
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6, 6
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim) / float(max_dim)
# print max_dim, dim, ratio
ratio = np.max((.35, ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16, 16 * ratio * 1
# plt.figure(figsize=(sH,sV))
# plt.figure(figsize=(7,7))
no_row_in_plot = dim / col_sz + 1 # 6 is arbitrarily selected
if no_row_in_plot <= 1:
no_col_in_plot = dim
else:
no_col_in_plot = col_sz
axisNum = 0
compname = getattr(self, 'compname')
h = .1
w = .1
fig = plt.figure(figsize=(no_col_in_plot * 2.5 * (1 + w), no_row_in_plot * 2.5 * (1 + h)))
# print no_row_in_plot, no_col_in_plot
norm = matplotlib.colors.Normalize(vmin=np.median(codebook.flatten()) - 1.5 * np.std(codebook.flatten()),
vmax=np.median(codebook.flatten()) + 1.5 * np.std(codebook.flatten()),
clip=False)
DD = pd.Series(data=codebook.flatten()).describe(
percentiles=[.03, .05, .1, .25, .3, .4, .5, .6, .7, .8, .9, .95, .97])
norm = matplotlib.colors.Normalize(vmin=DD.ix['3%'], vmax=DD.ix['97%'], clip=False)
while axisNum < dim:
axisNum += 1
ax = fig.add_subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum - 1])
mp = codebook[:, ind].reshape(msz0, msz1)
if grid == 'Yes':
pl = plt.pcolor(mp[::-1])
elif grid == 'No':
plt.imshow(mp[::-1], norm=None, cmap=CMAP)
# plt.pcolor(mp[::-1])
plt.axis('off')
if text == 'Yes':
plt.title(compname[0][ind])
font = {'size': text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0, msz1)])
ax.yaxis.set_ticks([i for i in range(0, msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True, linestyle='-', linewidth=0.5, color='k')
# plt.grid()
# plt.colorbar(pl)
# plt.tight_layout()
plt.subplots_adjust(hspace=h, wspace=w)
if what == 'cluster':
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
else:
print('clustering based on default parameters...')
codebook = self.cluster()
h = .2
w = .001
fig = plt.figure(figsize=(msz0 / 2, msz1 / 2))
ax = fig.add_subplot(1, 1, 1)
mp = codebook[:].reshape(msz0, msz1)
if grid == 'Yes':
pl = plt.pcolor(mp[::-1])
elif grid == 'No':
plt.imshow(mp[::-1])
# plt.pcolor(mp[::-1])
plt.axis('off')
if text == 'Yes':
plt.title('clusters')
font = {'size': text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0, msz1)])
ax.yaxis.set_ticks([i for i in range(0, msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True, linestyle='-', linewidth=0.5, color='k')
plt.subplots_adjust(hspace=h, wspace=w)
if save == 'Yes':
if save_dir != 'empty':
# print save_dir
fig.savefig(save_dir, bbox_inches='tight', transparent=False, dpi=200)
else:
# print save_dir
add = '/Users/itadmin/Desktop/SOM.png'
fig.savefig(add, bbox_inches='tight', transparent=False, dpi=200)
plt.close(fig)
def view_1d(self, text_size, which_dim='all', what='codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self, 'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print('first initialize codebook')
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0, dim).T
ratio = float(dim) / float(dim)
ratio = np.max((.35, ratio))
sH, sV = 16, 16 * ratio * 1
plt.figure(figsize=(sH, sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6, 6
plt.figure(figsize=(sH, sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim) / float(max_dim)
# print max_dim, dim, ratio
ratio = np.max((.35, ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16, 16 * ratio * 1
plt.figure(figsize=(sH, sV))
no_row_in_plot = dim / 6 + 1 # 6 is arbitrarily selected
if no_row_in_plot <= 1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
while axisNum < dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum - 1])
mp = codebook[:, ind]
plt.plot(mp, '-k', linewidth=0.8)
# pl = plt.pcolor(mp[::-1])
plt.title(compname[0][ind])
font = {'size': text_size * sH / no_col_in_plot}
plt.rc('font', **font)
# plt.axis('off')
# plt.axis([0, msz0, 0, msz1])
# ax.set_yticklabels([])
# ax.set_xticklabels([])
# plt.colorbar(pl)
plt.show()
def lininit(self):
# X = UsigmaWT
# XTX = Wsigma^2WT
# T = XW = Usigma #Transformed by W EigenVector, can be calculated by
# multiplication PC matrix by eigenval too
# Furthe, we can get lower ranks by using just few of the eigen vevtors
# T(2) = U(2)sigma(2) = XW(2) ---> 2 is the number of selected eigenvectors
# This is how we initialize the map, just by using the first two first eigen vals and eigenvectors
# Further, we create a linear combination of them in the new map by giving values from -1 to 1 in each
# Direction of SOM map
# it shoud be noted that here, X is the covariance matrix of original data
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
nnodes = getattr(self, 'nnodes')
if np.min(msize) > 1:
coord = np.zeros((nnodes, 2))
for i in range(0, nnodes):
coord[i, 0] = int(i / cols) # x
coord[i, 1] = int(i % cols) # y
mx = np.max(coord, axis=0)
mn = np.min(coord, axis=0)
coord = (coord - mn) / (mx - mn)
coord = (coord - .5) * 2
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes, 1))
pca = RandomizedPCA(n_components=2) # Randomized PCA is scalable
# pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T / norms) * eigval).T;
eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j, :] = codebook[j, :] + coord[j, i] * eigvec[i, :]
return np.around(codebook, decimals=6)
elif np.min(msize) == 1:
coord = np.zeros((nnodes, 1))
for i in range(0, nnodes):
# coord[i,0] = int(i/cols) #x
coord[i, 0] = int(i % cols) # y
mx = np.max(coord, axis=0)
mn = np.min(coord, axis=0)
# print coord
coord = (coord - mn) / (mx - mn)
coord = (coord - .5) * 2
# print coord
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes, 1))
pca = RandomizedPCA(n_components=1) # Randomized PCA is scalable
# pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T / norms) * eigval).T;
eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j, :] = codebook[j, :] + coord[j, i] * eigvec[i, :]
return np.around(codebook, decimals=6)
def normalize(data, method='var'):
# methods = ['var','range','log','logistic','histD','histC']
# status = ['done', 'undone']
me = np.mean(data, axis=0)
st = np.std(data, axis=0)
if method == 'var':
me = np.mean(data, axis=0)
st = np.std(data, axis=0)
n_data = (data - me) / st
return n_data
def normalize_by(data_raw, data, method='var'):
# methods = ['var','range','log','logistic','histD','histC']
# status = ['done', 'undone']
# to have the mean and std of the original data, by which SOM is trained
me = np.mean(data_raw, axis=0)
st = np.std(data_raw, axis=0)
if method == 'var':
n_data = (data - me) / st
return n_data
def denormalize_by(data_by, n_vect, n_method='var'):
# based on the normalization
if n_method == 'var':
me = np.mean(data_by, axis=0)
st = np.std(data_by, axis=0)
vect = n_vect * st + me
return vect
else:
print('data is not normalized before')
return n_vect
def l(a, b):
c = np.zeros(b.shape)
c[a - b >= 0] = 1
return c
##Function to show hits
# som_labels = sm.project_data(Tr_Data)
# S = pd.DataFrame(data=som_labels,columns= ['label'])
# a = S['label'].value_counts()
# a = a.sort_index()
# a = pd.DataFrame(data=a.values, index=a.index,columns=['label'])
# d = pd.DataFrame(data= range(msz0*msz1),columns=['node_ID'])
# c = d.join(a,how='outer')
# c.fillna(value=0,inplace=True)
# hits = c.values[:,1]
# hits = hits
# nodeID = np.arange(msz0*msz1)
# c_bmu = nodeID%msz1
# r_bmu = msz0 - nodeID/msz1
# fig, ax = plt.subplots()
# plt.axis([0, msz0, 0, msz1])
# ax.scatter(r_bmu, c_bmu, s=hits/2)
# Status API Training Shop Blog About Help
# 2015 GitHub, Inc. Terms Privacy Security Contact
| thorwhalen/ut | ml/decomp/sompy.py | Python | mit | 45,652 | [
"Gaussian"
] | 963a2ba132738ca49a05ba5a0c0d5995ec609f0a83bbd60d60f00f4ec47496cc |
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 1000
n = 2000
testMehrotra = True
testIPF = False
testADMM = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a dense matrix
def RectangDense(height,width):
A = El.DistMatrix()
El.Gaussian( A, height, width )
return A
A = RectangDense(m,n)
# Generate a b which implies a primal feasible x
# ==============================================
xGen = El.DistMatrix()
El.Uniform(xGen,n,1,0.5,0.4999)
b = El.DistMatrix()
El.Zeros( b, m, 1 )
El.Gemv( El.NORMAL, 1., A, xGen, 0., b )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMatrix()
El.Gaussian(yGen,m,1)
c = El.DistMatrix()
El.Uniform(c,n,1,0.5,0.5)
El.Gemv( El.TRANSPOSE, -1., A, yGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
El.Display( c, "c" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.LPDirectCtrl_d(isSparse=False)
xOrig = El.DistMatrix()
yOrig = El.DistMatrix()
zOrig = El.DistMatrix()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,n,1,0.5,0.4999)
x = El.DistMatrix()
y = El.DistMatrix()
z = El.DistMatrix()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startMehrotra = El.mpi.Time()
El.LPDirect(A,b,c,x,y,z,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print "Mehrotra c^T x =", obj
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startIPF = El.mpi.Time()
El.LPDirect(A,b,c,x,y,z,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
if testADMM:
ctrl.approach = El.LP_ADMM
ctrl.admmCtrl.progress = progress
startADMM = El.mpi.Time()
x = El.LPDirect(A,b,c,x,y,z,ctrl)
endADMM = El.mpi.Time()
if worldRank == 0:
print "ADMM time:", endADMM-startADMM
if display:
El.Display( x, "x ADMM" )
obj = El.Dot(c,x)
if worldRank == 0:
print "ADMM c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| birm/Elemental | examples/interface/LPDirectDense.py | Python | bsd-3-clause | 3,253 | [
"Gaussian"
] | 0b8e6ce58f5fb01e6a034bcae926f03d335cd984b2b04ec8674e8bd388bb1279 |
# Copyright 2015 Adriana Supady
#
# This file is part of fafoom.
#
# Fafoom is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fafoom is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with fafoom. If not, see <http://www.gnu.org/licenses/>.
''' Collection of diverse help/convert functions '''
from __future__ import division
import os
import numpy as np
import math
import shutil
import ConfigParser
from rdkit import Chem
from rdkit.Chem import AllChem
from operator import itemgetter
# Flow-handling
def backup(filename, obj):
""" Write the representation of an object (or objects) to a file."""
with open(filename, 'w') as outf:
if hasattr(obj, "__len__"):
for i in range(len(obj)):
outf.write("%s\n" % repr(obj[i]))
else:
outf.write("%s\n" % repr(obj))
outf.close()
def boolean(string):
"""Recover the boolean value from a string and return it."""
if string in ["False", "false", "FALSE"]:
return False
if string in ["True", "true", "TRUE"]:
return True
raise ValueError("Cannot be converted to a boolean type")
def number(s):
"""Convert to integer of float if needed"""
try:
return int(s)
except ValueError:
return float(s)
def print_output(text):
"""Write text to the 'output.txt'. Create it if needed."""
if os.path.isfile("output.txt"):
f = open("output.txt", "a")
f.write(str(text)+'\n')
f.close()
else:
f = open("output.txt", "w")
f.write(str(text)+'\n')
f.close()
def remover_file(instance):
"""Remove a file (if it exists)."""
try:
os.remove(instance)
except OSError:
pass
def remover_dir(instance):
"""Remove a directory (if it exists)."""
try:
shutil.rmtree(instance)
except OSError:
pass
def file2string(input_file):
"""Read a file to a string and return it."""
with open(input_file, 'r') as f:
string = f.read()
f.close()
return string
def string2file(string, filename):
"""Write a string to a file"""
with open(filename, 'w') as target:
target.write(string)
target.close()
def set_default(params, dict_default):
"""Set defaults for missing keys and add the key:value pairs to the
dict."""
for key in dict_default:
if key not in params:
print_output("Setting a default value for "+str(key)+": " +
str(dict_default[key]))
params[str(key)] = dict_default[key]
return params
def file2dict(filename, sections):
"""Parse a file and create a dictionary"""
config = ConfigParser.RawConfigParser()
config.read(filename)
new_dict = {}
for section in sections:
if config.has_section(section):
for key, value in config.items(section):
new_dict[str(key)] = eval(value)
return new_dict
# Help vector/matrix functions
def ig(x):
return itemgetter(x)
def cleaner(list_to_clean):
""" Remove duplicate torsion definion from a list of atom ind. tuples."""
for_remove = []
for x in reversed(range(len(list_to_clean))):
for y in reversed(range(x)):
ix1, ix2 = ig(1)(list_to_clean[x]), ig(2)(list_to_clean[x])
iy1, iy2 = ig(1)(list_to_clean[y]), ig(2)(list_to_clean[y])
if (ix1 == iy1 and ix2 == iy2) or (ix1 == iy2 and ix2 == iy1):
for_remove.append(y)
clean_list = [v for i, v in enumerate(list_to_clean)
if i not in set(for_remove)]
return clean_list
def get_vec(vec1, vec2):
"""Calculate difference between vectors of angles [in rad!].
Args:
vec1 (list) in deg
vec2 (list) in deg
Returns:
numpy array
Raises:
ValueError: if the length of the lists differ
Warning1: the vectors contain periodic values, i.e -185 -> 175
Warning2: symmetry is not included here, but can be easily added if the
index of the 'symmetric' torsion is known
"""
if len(vec1) != len(vec2):
raise ValueError("No length match between the lists")
diff_vec = np.zeros(len(vec1))
for i in range(0, len(vec1)):
tor_diff = abs(vec1[i]-vec2[i])
diff_vec[i] = min(abs(tor_diff), abs(360-tor_diff))/180.0
return diff_vec
def tor_rmsd(p, vec):
"""Calculate the modified p norm.The difference from standard norm is the
fact that the addends are divided by the length of the vector."""
summe = 0
for i in range(0, len(vec)):
summe += math.pow(abs(vec[i]), p)
return math.pow(summe/len(vec), (1.0/p))
def get_cartesian_rms(sdf_string1, sdf_string2):
"""Return the optimal RMS after aligning two structures."""
ref = Chem.MolFromMolBlock(sdf_string1, removeHs=False)
probe = Chem.MolFromMolBlock(sdf_string2, removeHs=False)
rms = AllChem.GetBestRMS(ref, probe)
return rms
def lowest_cartesian(string1, string2, **linked_strings):
"""Select lowest Cartesian RMS for two structures (for nonchiral and
previously optimized structures)."""
values = []
get_cartesian_rms(string1, string2)
values.append(get_cartesian_rms(string1, string2))
if linked_strings:
for string in linked_strings:
values.append(get_cartesian_rms(string1, string))
return min(values)
def find_one_in_list(sum_array, list_to_search):
"""Generate a random number and return the corresponding index from a
list. See the description of the method find_two_in_list."""
nparray_to_search = np.array(list_to_search)
rn = sum_array*np.random.rand()
found = False
index = 0
while not found:
if rn <= nparray_to_search[:index+1].sum(axis=0):
found = True
else:
index += 1
return index
def find_two_in_list(list_sum, nparray_to_search):
"""A numpy array is mapped to a segment of a line which length is equal to
1. The lengths of the segments are proportional to the corresponding numpy
array values. Next, two random numbers between 0 and 1 are generated and
the segments containing these random numbers are returned."""
rn1 = list_sum*np.random.rand()
found1 = False
index1 = 0
while not found1:
if rn1 < nparray_to_search[:index1+1].sum(axis=0):
found1 = True
else:
index1 += 1
equal = True
while equal:
rn2 = list_sum*np.random.rand()
found2 = False
index2 = 0
while not found2:
if rn2 < nparray_to_search[:index2+1].sum(axis=0):
found2 = True
else:
index2 += 1
if index2 != index1:
equal = False
return index1, index2
def find_closest(numb, list_of_values, periodic=False):
"""For a given number, return the closest value(s) from a given list"""
all_dist = []
for value in list_of_values:
if periodic:
all_dist.append(min(abs(numb-value), (360-abs(numb-value))))
else:
all_dist.append(abs(numb-value))
m = min(all_dist)
closest_ind = [i for i, j in enumerate(all_dist) if j == m]
closest = []
for ind in closest_ind:
closest.append(list_of_values[ind])
return closest
def distance(x, y):
""""Calculate distance between two points in 3D."""
return np.sqrt((x[0]-y[0])**2+(x[1]-y[1])**2+(x[2]-y[2])**2)
def check_geo_sdf(sdf_string, cutoff1, cutoff2):
"""Check geometry from a sdf_string for clashes.
Args:
sdf_string (str)
distance_cutoff_1 (float): min distance between non-bonded atoms [A]
distance_cutoff_2 (float): max distance between bonded atoms [A]
Returns:
True for clash-free geometries and False for invalid geometries
Raises:
ValueError: if distance cutoffs are non-positive
"""
if cutoff1 <= 0 or cutoff2 <= 0:
raise ValueError("Distance cutoff needs to be a positive float")
atoms, bonds = get_ind_from_sdfline(sdf_string.split('\n')[3])
coordinates = np.zeros((atoms, 3))
bonds_list = []
for i in range(4, atoms+4):
coordinates[i-4][0:3] = sdf_string.split('\n')[i].split()[0:3]
for i in range(atoms+4, atoms+bonds+4):
e1, e2 = get_ind_from_sdfline(sdf_string.split('\n')[i])
bonds_list.append([e1, e2])
dist = np.zeros((atoms, atoms))
for x in range(atoms):
for y in range(x, atoms):
dist[x][y] = distance(np.array(coordinates[x]),
np.array(coordinates[y]))
dist[y][x] = dist[x][y]
check = True
for x in range(atoms):
for y in range(x+1, atoms):
if [x+1, y+1] not in bonds_list and [y+1, x+1] not in bonds_list:
if dist[x][y] < cutoff1:
check = False
return check
else:
if dist[x][y] > cutoff2:
check = False
return check
return check
def get_ind_from_sdfline(sdf_line):
"""Extract the indicies from the sdf string (for molecules with more than
99 atoms)"""
l = len(sdf_line.split()[0])
if l < 4:
ind1 = int(sdf_line.split()[0])
ind2 = int(sdf_line.split()[1])
else:
list_ind = list(sdf_line.split()[0])
if len(list_ind) == 5:
ind1 = int(list_ind[0]+list_ind[1])
ind2 = int(list_ind[2]+list_ind[3]+list_ind[4])
if len(list_ind) == 6:
ind1 = int(list_ind[0]+list_ind[1]+list_ind[2])
ind2 = int(list_ind[3]+list_ind[4]+list_ind[5])
return ind1, ind2
# Format conversions
def sdf2aims(sdf_string):
"""Convert a sdf string to a aims string."""
atoms = get_ind_from_sdfline(sdf_string.split('\n')[3])[0]
coord = []
for i in range(4, 4+atoms):
x = float(sdf_string.split('\n')[i].split()[0])
y = float(sdf_string.split('\n')[i].split()[1])
z = float(sdf_string.split('\n')[i].split()[2])
name = sdf_string.split('\n')[i].split()[3]
coord.append('%s%10.4f%10.4f%10.4f%4s' % ('atom', x, y, z, name))
coord.append('\n')
aims_string = ''.join(coord)
return aims_string
def sdf2xyz(sdf_string):
"""Convert a sdf string to a xyz string."""
atoms = get_ind_from_sdfline(sdf_string.split('\n')[3])[0]
coord = [str(atoms)+('\n')]
for i in range(4, 4+atoms):
x = float(sdf_string.split('\n')[i].split()[0])
y = float(sdf_string.split('\n')[i].split()[1])
z = float(sdf_string.split('\n')[i].split()[2])
name = sdf_string.split('\n')[i].split()[3]
coord.append('\n%2s%10.4f%10.4f%10.4f' % (name, x, y, z))
coord.append('\n')
xyz_string = ''.join(coord)
return xyz_string
def aims2sdf(aims_string, sdf_template_string):
"""Convert a aims string to a sdf string. Template for the sdf string is
required."""
atoms = len(aims_string.splitlines())
sdf_form = sdf_template_string.splitlines()
c = []
cnt = 0
for i in range(len(sdf_form)):
if i > 3 and i < 4+atoms:
line = sdf_form[i].split()
line[0] = aims_string.split()[5*cnt+1]
line[1] = aims_string.split()[5*cnt+2]
line[2] = aims_string.split()[5*cnt+3]
cnt += 1
c.append('%10.4f%10.4f%10.4f%s%-2s' % (float(line[0]),
float(line[1]),
float(line[2]), str(' '),
line[3]))
for j in xrange(4, len(line)):
if j == 4:
c.append('%3d' % int(line[j]))
elif j == len(line)-1:
c.append('%3d\n' % int(line[j]))
else:
c.append('%3d' % int(line[j]))
else:
c.append(''.join(sdf_form[i])+'\n')
sdf_string = ''.join(c)
return sdf_string
def xyz2sdf(xyz_string, sdf_template_string):
"""Convert a xyz string to a sdf string. Template for the sdf string is
required."""
arr = xyz_string.splitlines()
atoms = int(arr[0].split()[0])
xyz_string_cut = '\n'.join(arr[2:])
sdf_form = sdf_template_string.splitlines()
c = []
cnt = 0
for i in range(len(sdf_form)):
if i > 3 and i < 4+atoms:
line = sdf_form[i].split()
line[0] = xyz_string_cut.split()[4*cnt+1]
line[1] = xyz_string_cut.split()[4*cnt+2]
line[2] = xyz_string_cut.split()[4*cnt+3]
cnt += 1
c.append('%10.4f%10.4f%10.4f%s%-2s' % (float(line[0]),
float(line[1]),
float(line[2]), str(' '),
line[3]))
for j in xrange(4, len(line)):
if j == 4:
c.append('%3d' % int(line[j]))
elif j == len(line)-1:
c.append('%3d\n' % int(line[j]))
else:
c.append('%3d' % int(line[j]))
else:
c.append(''.join(sdf_form[i])+'\n')
sdf_string = ''.join(c)
return sdf_string
def mirror_sdf(sdf_string):
"""Mirror the geometry from a sdf string. Return a new sdf string."""
atoms = get_ind_from_sdfline(sdf_string.split('\n')[3])[0]
sdf_form = sdf_string.splitlines()
c = []
cnt = 0
for i in range(len(sdf_form)):
if i > 3 and i < 4+atoms:
line = sdf_form[i].split()
line[0] = -1.0*float(line[0])
line[1] = -1.0*float(line[1])
line[2] = -1.0*float(line[2])
cnt += 1
c.append('%10.4f%10.4f%10.4f%s%-2s' % (float(line[0]),
float(line[1]),
float(line[2]), str(' '),
line[3]))
for j in xrange(4, len(line)):
if j == 4:
c.append('%3d' % int(line[j]))
elif j == len(line)-1:
c.append('%3d\n' % int(line[j]))
else:
c.append('%3d' % int(line[j]))
else:
c.append(''.join(sdf_form[i])+'\n')
mirror_sdf_string = ''.join(c)
return mirror_sdf_string
| FHIBioGroup/fafoom-dev | fafoom/utilities.py | Python | gpl-3.0 | 14,972 | [
"RDKit"
] | c4774ba30ae30db6a93c2afa7863ebfffd85b2706a855965f8e85a64b3477578 |
from .mle import saccade_model_mle
from .utils import *
def saccade_model_em(pointlist):
'''
Estimates the reaction time and duration of the saccade by
fitting a saccade model to the data.
The model consists of three phases:
1) source phase, gaze is fixated onto a point
2) saccade phase, gaze moves steadily from the source point
onto the target point
3) target phase, gaze becomes fixated onto a point.
The estimation is done in Expectation-Maximation manner:
1) Initial locations are given for the source and target points.
2) Expectation: given the source and target points, saccade start
and end times are calculated and the gazepoints are divided
into three classes: source, saccade, and target gazepoints.
In EM terminology, the classes are the latent variables.
3) Maximization: the means of the new source and target gazepoints
become the new values of the source and target points.
4) Repeat steps 2) and 3) until the source and target points stay
the same.
Input arguments
pointlist, list of [x, y] points. 'None' values are not allowed.
Output arguments
source_points
saccade_points
target_points
mean_squared_error
Here we use two different concepts, times and indices:
Time t 0 1 2 3 4 5
| | | | | |
Vector [ 2 3 1 2 1 ]
| | | | |
Index i 0 1 2 3 4
'''
# Aliases
g = pointlist
# Max
max_t = len(g)
max_i = max_t - 1
# Initialize
mu_s = g[0] # First
mu_t = g[-1] # Last
t_start = min(max_t, 60) # Average SRT is about 200 ms
t_end = min(max_t, 70) # Average SD is about 30 ms
# To detect nonconvergent situations, memorize the visited t_start and
# t_end pairs and their model error.
t_history = TimePairValueHistory()
# Limit iterations in case there is a bug
max_iters = 50
em_iters = 0
for _ in range(max_iters):
t_start_hat, t_end_hat, mse, src_sse, sacc_sse, tgt_sse = saccade_model_mle(g, mu_s, mu_t, t_start, t_end)
if t_end_hat < t_start_hat:
raise Exception('t_end_hat < t_start_hat: ' + str(t_end_hat) + ',' + str(t_start_hat))
# Determine new centroids.
# Limit times so that there is at least one gazepoint.
t_start_c = min(max(t_start_hat, 1), max_t - 1)
t_end_c = min(max(t_end_hat , 1), max_t - 1)
# Compute means based on windows of 100 ms before and after saccade
g_source = select_points_time_to_time(g, 0, t_start_c)
g_target = select_points_time_to_time(g, t_end_c, max_t)
g_source30 = select_last_points(g_source, 30)
g_target30 = select_first_points(g_target, 30)
mu_s_hat = mean_point(g_source30)
mu_t_hat = mean_point(g_target30)
mu_s = mu_s_hat
mu_t = mu_t_hat
t_start = t_start_hat
t_end = t_end_hat
# Compute until we have arrived to same state again.
if not t_history.is_visited(t_start_hat, t_end_hat):
t_history.visit(t_start, t_end, mse, {
'src_sse': src_sse,
'sacc_sse': sacc_sse,
'tgt_sse': tgt_sse,
})
# The next round either is minimal again or goes here.
em_iters += 1
#print('t_start: ' + str(t_start))
#print('t_end: ' + str(t_end))
#print('mse: ' + str(mse))
else:
# Select the parameters that gave minimum error
t_start, t_end, mse, d = t_history.get_minimum()
src_sse = d['src_sse']
sacc_sse = d['sacc_sse']
tgt_sse = d['tgt_sse']
break
if em_iters == max_iters:
did_converge = False
else:
did_converge = True
source_points = select_points_time_to_time(g, 0, t_start)
saccade_points = select_points_time_to_time(g, t_start, t_end)
target_points = select_points_time_to_time(g, t_end, None)
mean_squared_error = mse
return source_points, saccade_points, target_points, mean_squared_error
| infant-cognition-tampere/saccademodel-py | saccademodel/em.py | Python | mit | 4,165 | [
"VisIt"
] | 547036834d52f558369daddd45e538aaef936e6d0fcb39c0716a791bb2559bc5 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
# TPR parser and tpr support module
# Copyright (c) 2011 Zhuyi Xue
# Released under the GNU Public Licence, v2
"""
Utilities for the TPRParser
===========================
Function calling order::
(TPRParser.py call do_mtop)
do_mtop -> do_symtab
-> do_ffparams -> do_iparams
-> do_moltype -> do_atoms -> do_atom
-> do_resinfo
-> do_ilists
-> do_block
-> do_blocka
-> do_molblock
Then compose the stuffs in the format :class:`MDAnalysis.Universe` reads in.
The module also contains the :func:`do_inputrec` to read the TPR header with.
"""
from __future__ import absolute_import
from six.moves import range
import numpy as np
from . import obj
from . import setting as S
from ..base import squash_by
from ...core.topology import Topology
from ...core.topologyattrs import (
Atomids,
Atomnames,
Atomtypes,
Masses,
Charges,
Resids,
Resnames,
Segids,
Bonds,
Angles,
Dihedrals,
Impropers
)
def do_string(data):
"""Emulate gmx_fio_do_string
gmx_fio_do_string reads a string from a XDR file. On the contraty to the
python unpack_string, gmx_fio_do_string reads the size as an unsigned
interger before reading the actual string.
See <gromacs-2016-src>/src/gromacs/fileio/gmx_system_xdr.c:454
"""
data.unpack_int()
return data.unpack_string()
def ndo_int(data, n):
"""mimic of gmx_fio_ndo_real in gromacs"""
return [data.unpack_int() for i in range(n)]
def ndo_real(data, n):
"""mimic of gmx_fio_ndo_real in gromacs"""
return [data.unpack_real() for i in range(n)]
def do_rvec(data):
return data.unpack_farray(S.DIM, data.unpack_real)
def ndo_rvec(data, n):
"""mimic of gmx_fio_ndo_rvec in gromacs"""
return [data.unpack_farray(S.DIM, data.unpack_real) for i in range(n)]
def ndo_ivec(data, n):
"""mimic of gmx_fio_ndo_rvec in gromacs"""
return [data.unpack_farray(S.DIM, data.unpack_int) for i in range(n)]
def fileVersion_err(fver):
if fver not in S.SUPPORTED_VERSIONS:
raise NotImplementedError(
"Your tpx version is {0}, which this parser does not support, yet ".format(
fver))
def define_unpack_real(prec, data):
"""Define an unpack_real method of data based on the float precision used"""
if prec == 4:
data.unpack_real = data.unpack_float
elif prec == 8:
data.unpack_real = data.unpack_double
else:
raise ValueError("unsupported precision: {0}".format(prec))
def read_tpxheader(data):
"""this function is now compatible with do_tpxheader in tpxio.cpp
"""
# Last compatibility check with gromacs-2016
ver_str = do_string(data) # version string e.g. VERSION 4.0.5
precision = data.unpack_int() # e.g. 4
define_unpack_real(precision, data)
fileVersion = data.unpack_int() # version of tpx file
fileVersion_err(fileVersion)
# This is for backward compatibility with development version 77-79 where
# the tag was, mistakenly, placed before the generation.
if 77 <= fileVersion <= 79:
data.unpack_int() # the value is 8, but haven't found the
file_tag = do_string(data)
fileGeneration = data.unpack_int() if fileVersion >= 26 else 0 # generation of tpx file, e.g. 17
# Versions before 77 don't have the tag, set it to TPX_TAG_RELEASE file_tag
# file_tag is used for comparing with tpx_tag. Only tpr files with a
# tpx_tag from a lower or the same version of gromacs code can be parsed by
# the tpxio.c
file_tag = do_string(data) if fileVersion >= 81 else S.TPX_TAG_RELEASE
natoms = data.unpack_int() # total number of atoms
ngtc = data.unpack_int() if fileVersion >= 28 else 0 # number of groups for T-coupling
if fileVersion < 62:
# not sure what these two are for.
data.unpack_int() # idum
data.unpack_real() # rdum
fep_state = data.unpack_int() if fileVersion >= 79 else 0
# actually, it's lambda, not sure what is it. us lamb because lambda is a
# keywod in python
lamb = data.unpack_real()
bIr = data.unpack_int() # has input record or not
bTop = data.unpack_int() # has topology or not
bX = data.unpack_int() # has coordinates or not
bV = data.unpack_int() # has velocity or not
bF = data.unpack_int() # has force or not
bBox = data.unpack_int() # has box or not
th = obj.TpxHeader(ver_str, precision, fileVersion, fileGeneration,
file_tag, natoms, ngtc, fep_state, lamb,
bIr, bTop, bX, bV, bF, bBox)
return th
def extract_box_info(data, fileVersion):
box = ndo_rvec(data, S.DIM)
box_rel = ndo_rvec(data, S.DIM) if fileVersion >= 51 else 0
box_v = ndo_rvec(data, S.DIM) if fileVersion >= 28 else None
if (fileVersion < 56):
ndo_rvec(data, S.DIM) # mdum?
return obj.Box(box, box_rel, box_v)
def do_mtop(data, fver):
# mtop: the topology of the whole system
symtab = do_symtab(data)
do_symstr(data, symtab) # system_name
do_ffparams(data, fver) # params
nmoltype = data.unpack_int()
moltypes = [] # non-gromacs
for i in range(nmoltype):
moltype = do_moltype(data, symtab, fver)
moltypes.append(moltype)
nmolblock = data.unpack_int()
mtop = obj.Mtop(nmoltype, moltypes, nmolblock)
bonds = []
angles = []
dihedrals = []
impropers = []
atomids = []
segids = []
resids = []
resnames = []
atomnames = []
atomtypes = []
charges = []
masses = []
atom_start_ndx = 0
res_start_ndx = 0
for i in range(mtop.nmolblock):
# molb_type is just an index for moltypes/molecule_types
mb = do_molblock(data)
# segment is made to correspond to the molblock as in gromacs, the
# naming is kind of arbitrary
segid = "seg_{0}_{1}".format(i, mtop.moltypes[mb.molb_type].name)
for j in range(mb.molb_nmol):
mt = mtop.moltypes[mb.molb_type] # mt: molecule type
for atomkind in mt.atomkinds:
atomids.append(atomkind.id + atom_start_ndx)
segids.append(segid)
resids.append(atomkind.resid + res_start_ndx)
resnames.append(atomkind.resname)
atomnames.append(atomkind.name)
atomtypes.append(atomkind.type)
charges.append(atomkind.charge)
masses.append(atomkind.mass)
# remap_ method returns [blah, blah, ..] or []
bonds.extend(mt.remap_bonds(atom_start_ndx))
angles.extend(mt.remap_angles(atom_start_ndx))
dihedrals.extend(mt.remap_dihe(atom_start_ndx))
impropers.extend(mt.remap_impr(atom_start_ndx))
atom_start_ndx += mt.number_of_atoms()
res_start_ndx += mt.number_of_residues()
# not useful here
# data.unpack_int() # mtop_natoms
# do_atomtypes(data)
# mtop_ffparams_cmap_grid_ngrid = 0
# mtop_ffparams_cmap_grid_grid_spacing = 0.1
# mtop_ffparams_cmap_grid_cmapdata = 'NULL'
# do_groups(data, symtab)
atomids = Atomids(np.array(atomids, dtype=np.int32))
atomnames = Atomnames(np.array(atomnames, dtype=object))
atomtypes = Atomtypes(np.array(atomtypes, dtype=object))
charges = Charges(np.array(charges, dtype=np.float32))
masses = Masses(np.array(masses, dtype=np.float32))
segids = np.array(segids, dtype=object)
resids = np.array(resids, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
(residx, new_resids,
(new_resnames, perres_segids)) = squash_by(resids, resnames, segids)
residueids = Resids(new_resids)
residuenames = Resnames(new_resnames)
segidx, perseg_segids = squash_by(perres_segids)[:2]
segids = Segids(perseg_segids)
top = Topology(len(atomids), len(new_resids), len(perseg_segids),
attrs=[atomids, atomnames, atomtypes,
charges, masses,
residueids, residuenames,
segids],
atom_resindex=residx,
residue_segindex=segidx)
top.add_TopologyAttr(Bonds([bond for bond in bonds if bond]))
top.add_TopologyAttr(Angles([angle for angle in angles if angle]))
top.add_TopologyAttr(Dihedrals([dihedral for dihedral in dihedrals
if dihedral]))
top.add_TopologyAttr(Impropers([improper for improper in impropers
if improper]))
return top
def do_symstr(data, symtab):
#do_symstr: get a string based on index from the symtab
ndx = data.unpack_int()
return symtab[ndx]
def do_symtab(data):
symtab_nr = data.unpack_int() # number of symbols
symtab = []
for i in range(symtab_nr):
j = do_string(data)
symtab.append(j)
return symtab
def do_ffparams(data, fver):
atnr = data.unpack_int()
if fver < 57:
data.unpack_int() # idum
ntypes = data.unpack_int()
functype = ndo_int(data, ntypes)
reppow = data.unpack_double() if fver >= 66 else 12.0
if fver >= 57:
fudgeQQ = data.unpack_real()
# mimicing the c code,
# remapping the functype due to inconsistency in different versions
for i in range(len(functype)):
for k in S.ftupd:
# j[0]: tpx_version, j[1] funtype
if fver < k[0] and functype[i] >= k[1]:
functype[i] += 1
# parameters for different functions, None returned for now since not sure
# what is iparams
iparams = do_iparams(data, functype, fver)
params = obj.Params(atnr, ntypes, functype, reppow, fudgeQQ, iparams)
return params
def do_harm(data):
data.unpack_real() # rA
data.unpack_real() # krA
data.unpack_real() # rB
data.unpack_real() # krB
def do_iparams(data, functypes, fver):
# Not all elif cases in this function has been used and tested
for k, i in enumerate(functypes):
if i in [
S.F_ANGLES, S.F_G96ANGLES,
S.F_BONDS, S.F_G96BONDS,
S.F_HARMONIC, S.F_IDIHS
]:
do_harm(data)
elif i in [S.F_RESTRANGLES]:
data.unpack_real() # harmonic.rA
data.unpack_real() # harmonic.krA
elif i in [S.F_LINEAR_ANGLES]:
data.unpack_real() # linangle.klinA
data.unpack_real() # linangle.aA
data.unpack() # linangle.klinB
data.unpack() # linangle.aB);
elif i in [S.F_FENEBONDS]:
data.unpack_real() # fene.bm
data.unpack_real() # fene.kb
elif i in [S.F_RESTRBONDS]:
data.unpack_real() # restraint.lowA
data.unpack_real() # restraint.up1A
data.unpack_real() # restraint.up2A
data.unpack_real() # restraint.kA
data.unpack_real() # restraint.lowB
data.unpack_real() # restraint.up1B
data.unpack_real() # restraint.up2B
data.unpack_real() # restraint.kB
elif i in [S.F_TABBONDS, S.F_TABBONDSNC, S.F_TABANGLES, S.F_TABDIHS]:
data.unpack_real() # tab.kA
data.unpack_int() # tab.table
data.unpack_real() # tab.kB
elif i in [S.F_CROSS_BOND_BONDS]:
data.unpack_real() # cross_bb.r1e
data.unpack_real() # cross_bb.r2e
data.unpack_real() # cross_bb.krr
elif i in [S.F_CROSS_BOND_ANGLES]:
data.unpack_real() # cross_ba.r1e
data.unpack_real() # cross_ba.r2e
data.unpack_real() # cross_ba.r3e
data.unpack_real() # cross_ba.krt
elif i in [S.F_UREY_BRADLEY]:
data.unpack_real() # u_b.theta
data.unpack_real() # u_b.ktheta
data.unpack_real() # u_b.r13
data.unpack_real() # u_b.kUB
if fver >= 79:
data.unpack_real() # u_b.thetaB
data.unpack_real() # u_b.kthetaB
data.unpack_real() # u_b.r13B
data.unpack_real() # u_b.kUBB
elif i in [S.F_QUARTIC_ANGLES]:
data.unpack_real() # qangle.theta
ndo_real(data, 5) # qangle.c
elif i in [S.F_BHAM]:
data.unpack_real() # bham.a
data.unpack_real() # bham.b
data.unpack_real() # bham.c
elif i in [S.F_MORSE]:
data.unpack_real() # morse.b0
data.unpack_real() # morse.cb
data.unpack_real() # morse.beta
if fver >= 79:
data.unpack_real() # morse.b0B
data.unpack_real() # morse.cbB
data.unpack_real() # morse.betaB
elif i in [S.F_CUBICBONDS]:
data.unpack_real() # cubic.b0
data.unpack_real() # cubic.kb
data.unpack_real() # cubic.kcub
elif i in [S.F_CONNBONDS]:
pass
elif i in [S.F_POLARIZATION]:
data.unpack_real() # polarize.alpha
elif i in [S.F_ANHARM_POL]:
data.unpack_real() # anharm_polarize.alpha
data.unpack_real() # anharm_polarize.drcut
data.unpack_real() # anharm_polarize.khyp
elif i in [S.F_WATER_POL]:
if fver < 31:
fver_err(fver)
data.unpack_real() # wpol.al_x
data.unpack_real() # wpol.al_y
data.unpack_real() # wpol.al_z
data.unpack_real() # wpol.rOH
data.unpack_real() # wpol.rHH
data.unpack_real() # wpol.rOD
elif i in [S.F_THOLE_POL]:
data.unpack_real() # thole.a
data.unpack_real() # thole.alpha1
data.unpack_real() # thole.alpha2
data.unpack_real() # thole.rfac
elif i in [S.F_LJ]:
data.unpack_real() # lj_c6
data.unpack_real() # lj_c9
elif i in [S.F_LJ14]:
data.unpack_real() # lj14_c6A
data.unpack_real() # lj14_c12A
data.unpack_real() # lj14_c6B
data.unpack_real() # lj14_c12B
elif i in [S.F_LJC14_Q]:
data.unpack_real() # ljc14.fqq
data.unpack_real() # ljc14.qi
data.unpack_real() # ljc14.qj
data.unpack_real() # ljc14.c6
data.unpack_real() # ljc14.c12
elif i in [S.F_LJC_PAIRS_NB]:
data.unpack_real() # ljcnb.qi
data.unpack_real() # ljcnb.qj
data.unpack_real() # ljcnb.c6
data.unpack_real() # ljcnb.c12
elif i in [S.F_PIDIHS, S.F_ANGRES, S.F_ANGRESZ, S.F_PDIHS]:
data.unpack_real() # pdihs_phiA
data.unpack_real() # pdihs_cpA
if (i == S.F_ANGRES or i == S.F_ANGRESZ) and fver < 42:
data.unpack_real() # harmonic.rB
data.unpack_real() # harmonic.krB
else:
data.unpack_real() # pdihs_phiB
data.unpack_real() # pdihs_cpB
data.unpack_int() # pdihs_mult
elif i in [S.F_RESTRDIHS]:
data.unpack_real() # pdihs.phiA
data.unpack_real() # pdihs.cpA
elif i in [S.F_DISRES]:
data.unpack_int() # disres.label
data.unpack_int() # disres.type
data.unpack_real() # disres.low
data.unpack_real() # disres.up1
data.unpack_real() # disres.up2
data.unpack_real() # disres.kfac
elif i in [S.F_ORIRES]:
data.unpack_int() # orires.ex
data.unpack_int() # orires.label
data.unpack_int() # orires.power
data.unpack_real() # orires.c
data.unpack_real() # orires.obs
data.unpack_real() # orires.kfac
elif i in [S.F_DIHRES]:
if fver < 72:
data.unpack_int() # idum
data.unpack_int() # idum
data.unpack_real() # dihres.phiA
data.unpack_real() # dihres.dphiA
data.unpack_real() # dihres.kfacA
if fver >= 72:
data.unpack_real() # dihres.phiB
data.unpack_real() # dihres.dphiB
data.unpack_real() # dihres.kfacB
elif i in [S.F_POSRES]:
do_rvec(data) # posres.pos0A
do_rvec(data) # posres.fcA
if fver < 27:
fver_err(fver)
else:
do_rvec(data) # posres.pos0B
do_rvec(data) # posres.fcB
elif i in [S.F_FBPOSRES]:
data.unpack_int() # fbposres.geom
do_rvec(data) # fbposres.pos0
data.unpack_real() # fbposres.r
data.unpack_real() # fbposres.k
elif i in [S.F_CBTDIHS]:
ndo_real(data, S.NR_CBTDIHS) # cbtdihs.cbtcA
elif i in [S.F_RBDIHS]:
ndo_real(data, S.NR_RBDIHS) # iparams_rbdihs_rbcA
if fver >= 25:
ndo_real(data, S.NR_RBDIHS) # iparams_rbdihs_rbcB
elif i in [S.F_FOURDIHS]:
# Fourier dihedrals
ndo_real(data, S.NR_RBDIHS) # rbdihs.rbcA
ndo_real(data, S.NR_RBDIHS) # rbdihs.rbcB
elif i in [S.F_CONSTR, S.F_CONSTRNC]:
data.unpack_real() # dA
data.unpack_real() # dB
elif i in [S.F_SETTLE]:
data.unpack_real() # settle.doh
data.unpack_real() # settle.dhh
elif i in [S.F_VSITE2]:
data.unpack_real() # vsite.a
elif i in [S.F_VSITE3, S.F_VSITE3FD, S.F_VSITE3FAD]:
data.unpack_real() # vsite.a
data.unpack_real() # vsite.b
elif i in [S.F_VSITE3OUT, S.F_VSITE4FD, S.F_VSITE4FDN]:
data.unpack_real() # vsite.a
data.unpack_real() # vsite.b
data.unpack_real() # vsite.c
elif i in [S.F_VSITEN]:
data.unpack_int() # vsiten.n
data.unpack_real() # vsiten.a
elif i in [S.F_GB12, S.F_GB13, S.F_GB14]:
# /* We got rid of some parameters in version 68 */
if fver < 68:
data.unpack_real() # rdum
data.unpack_real() # rdum
data.unpack_real() # rdum
data.unpack_real() # rdum
data.unpack_real() # gb.sar
data.unpack_real() # gb.st
data.unpack_real() # gb.pi
data.unpack_real() # gb.gbr
data.unpack_real() # gb.bmlt
elif i in [S.F_CMAP]:
data.unpack_int() # cmap.cmapA
data.unpack_int() # cmap.cmapB
else:
raise NotImplementedError("unknown functype: {0}".format(i))
return
def do_moltype(data, symtab, fver):
if fver >= 57:
molname = do_symstr(data, symtab)
# key info: about atoms
atoms_obj = do_atoms(data, symtab, fver)
#### start: MDAnalysis specific
atomkinds = []
for k, a in enumerate(atoms_obj.atoms):
atomkinds.append(obj.AtomKind(
k,
atoms_obj.atomnames[k],
atoms_obj.type[k],
a.resind,
atoms_obj.resnames[a.resind],
a.m,
a.q))
#### end: MDAnalysis specific
# key info: about bonds, angles, dih, improp dih.
ilists = do_ilists(data, fver)
#### start: MDAnalysis specific
# these may not be available for certain molecules, e.g. tip4p
bonds, angles, dihs, impr = [], [], [], []
for ilist in ilists:
if ilist.nr > 0:
ik_obj = obj.InteractionKind(*ilist.ik)
ias = ilist.iatoms
# the following if..elif..else statement needs to be updated as new
# type of interactions become interested
if ik_obj.name in ['BONDS', 'G96BONDS', 'MORSE', 'CUBICBONDS',
'CONNBONDS', 'HARMONIC', 'FENEBONDS',
'RESTRAINTPOT', 'CONSTR', 'CONSTRNC',
'TABBONDS', 'TABBONDSNC']:
bonds += list(ik_obj.process(ias))
elif ik_obj.name in ['ANGLES', 'G96ANGLES', 'CROSS_BOND_BOND',
'CROSS_BOND_ANGLE', 'UREY_BRADLEY', 'QANGLES',
'RESTRANGLES', 'TABANGLES']:
angles += list(ik_obj.process(ias))
elif ik_obj.name in ['PDIHS', 'RBDIHS', 'RESTRDIHS', 'CBTDIHS',
'FOURDIHS', 'TABDIHS']:
dihs += list(ik_obj.process(ias))
elif ik_obj.name in ['IDIHS', 'PIDIHS']:
impr += list(ik_obj.process(ias))
else:
# other interaction types are not interested at the moment
pass
bonds = bonds if bonds else None
angles = angles if angles else None
dihs = dihs if dihs else None
impr = impr if impr else None
moltype = obj.MoleculeKind(molname, atomkinds, bonds, angles, dihs, impr)
#### end: MDAnalysis specific
# info in do_block and do_blocka is not interested, but has to be parsed
# here so that all moltypes can be parsed properly
do_block(data)
do_blocka(data)
return moltype
def do_atoms(data, symtab, fver):
nr = data.unpack_int() # number of atoms in a particular molecule
nres = data.unpack_int() # number of residues in a particular molecule
if fver < 57:
fver_err(fver)
atoms = []
for i in range(nr):
A = do_atom(data, fver)
atoms.append(A)
# do_strstr
atomnames = [symtab[i] for i in ndo_int(data, nr)]
if fver <= 20:
fver_err(fver)
else:
type = [symtab[i] for i in ndo_int(data, nr)] # e.g. opls_111
typeB = [symtab[i] for i in ndo_int(data, nr)]
resnames = do_resinfo(data, symtab, fver, nres)
if fver < 57:
fver_err(fver)
return obj.Atoms(atoms, nr, nres, type, typeB, atomnames, resnames)
def do_resinfo(data, symtab, fver, nres):
if fver < 63:
resnames = [symtab[i] for i in ndo_int(data, nres)]
else:
resnames = []
for i in range(nres):
resnames.append(symtab[data.unpack_int()])
# assume the uchar in gmx is 8 byte, seems right
data.unpack_fstring(8)
return resnames
def do_atom(data, fver):
m = data.unpack_real() # mass
q = data.unpack_real() # charge
mB = data.unpack_real()
qB = data.unpack_real()
tp = data.unpack_uint() # type is a keyword in python
typeB = data.unpack_uint()
ptype = data.unpack_int() # regular atom, virtual site or others
resind = data.unpack_int() # index of residue
if fver >= 52:
atomnumber = data.unpack_int() # index of atom type
if fver < 23 or fver < 39 or fver < 57:
fver_err(fver)
return obj.Atom(m, q, mB, qB, tp, typeB, ptype, resind, atomnumber)
def do_ilists(data, fver):
nr = [] # number of ilist
iatoms = [] # atoms involved in a particular interaction type
for j in range(S.F_NRE): # total number of energies (i.e. interaction types)
bClear = False
for k in S.ftupd:
if fver < k[0] and j == k[1]:
bClear = True
if bClear:
nr.append(0)
iatoms.append(None)
else:
if fver < 44:
fver_err(fver)
# do_ilist
n = data.unpack_int()
nr.append(n)
l_ = []
for i in range(n):
l_.append(data.unpack_int())
iatoms.append(l_)
return [obj.Ilist(n, it, i) for n, it, i in zip(nr, S.interaction_types, iatoms)]
def do_molblock(data):
molb_type = data.unpack_int()
molb_nmol = data.unpack_int() # number of moles in the molblock
molb_natoms_mol = data.unpack_int() # number of atoms in a molecule
molb_nposres_xA = data.unpack_int()
if molb_nposres_xA > 0:
ndo_rvec(data, molb_nposres_xA)
molb_nposres_xB = data.unpack_int() # The number of posres coords for top B
if molb_nposres_xB > 0:
ndo_rvec(data, molb_nposres_xB)
return obj.Molblock(molb_type, molb_nmol, molb_natoms_mol,
molb_nposres_xA, molb_nposres_xB)
def do_block(data):
block_nr = data.unpack_int() # for cgs: charge groups
# starting or ending atom indices, based on which cgs are grouped
ndo_int(data, block_nr + 1)
return do_block
def do_blocka(data):
block_nr = data.unpack_int() # No. of atoms with excls
block_nra = data.unpack_int() # total times fo appearance of atoms for excls
ndo_int(data, block_nr + 1)
ndo_int(data, block_nra)
return block_nr, block_nra
##############UTILS FOR INFORMATION NOT INTERESTED AT THE MOMENT###############
def do_grps(data): # pragma: no cover
grps_nr = []
myngrps = ngrps = S.egcNR # remind of version inconsistency
for j in range(ngrps):
if j < myngrps:
v = data.unpack_int()
grps_nr.append(v)
ndo_int(data, v)
return grps_nr
def do_groups(data, symtab): # pragma: no cover
do_grps(data)
ngrpname = data.unpack_int()
# do_strstr, list of indices of group name: e.g. System, Protein,
# Protein-H, etc. use symtab[i] to check
ndo_int(data, ngrpname)
ngrpnr = []
grpnr = []
for i in range(S.egcNR):
x = data.unpack_int()
ngrpnr.append(x)
if x == 0:
grpnr.append(None)
else:
l_ = []
for i in range(x):
l_.append(data.unpack_uint())
grpnr.append(l_)
# print ngrpnr
# print [len(i) for i in grpnr if i]
return
def do_atomtypes(data): # pragma: no cover
at_nr = data.unpack_int() # at: atomtype
at_radius = ndo_real(data, at_nr)
at_vol = ndo_real(data, at_nr)
at_surftens = ndo_real(data, at_nr)
at_atomnumber = ndo_int(data, at_nr)
return at_radius, at_vol, at_surftens, at_atomnumber
def do_inputrec(data): # pragma: no cover
"""Read through header information from TPR file *data* structure.
Note that this function does not return any useful data itself. If
your are interested in using the header information, use this
functions as a starting point for your own code.
"""
data.unpack_int() # ir_eI
data.unpack_int() # ir_nsteps = idum
data.unpack_int() # ir_init_step = idum =
data.unpack_int() # simulation_part
# not relevant here
# ir_nstcalcenergy = 1
data.unpack_int() # ir_ns_type
data.unpack_int() # ir_nslist
data.unpack_int() # ir_ndelta
data.unpack_real() # ir_rtpi
data.unpack_int() # ir_nstcomm
abs(data.unpack_int()) # ir_comm_mode
data.unpack_int() # ir_nstcheckpoint
data.unpack_int() # ir_nstcgsteep
data.unpack_int() # ir_nbfgscorr
data.unpack_int() # ir_nstlog
data.unpack_int() # ir_nstxout
data.unpack_int() # ir_nstvout
data.unpack_int() # ir_nstfout
data.unpack_int() # ir_nstenergy
data.unpack_int() # ir_nstxtcout
data.unpack_real() # ir_init_t = rdum =
data.unpack_real() # ir_delta_t = rdum =
data.unpack_real() # ir_xtcprec
ir_rlist = data.unpack_real()
data.unpack_int() # ir_coulombtype
data.unpack_real() # ir_rcoulomb_switch
ir_rcoulomb = data.unpack_real()
data.unpack_int() # ir_rvdwtype
data.unpack_real() # ir_rvdw_switch
ir_rvdw = data.unpack_real()
max(ir_rlist, max(ir_rvdw, ir_rcoulomb)) # ir_rlistlong
data.unpack_int() # ir_eDispCorr
data.unpack_real() # ir_epsilon_r
data.unpack_real() # ir_epsilon_rf
data.unpack_real() # ir_tabext
data.unpack_int() # ir_gb_algorithm
data.unpack_int() # ir_nstgbradii
data.unpack_real() # ir_rgbradii
data.unpack_real() # ir_gb_saltconc
data.unpack_int() # ir_implicit_solvent
data.unpack_real() # ir_gb_epsilon_solvent
data.unpack_real() # ir_gb_obc_alpha
data.unpack_real() # ir_gb_obc_beta
data.unpack_real() # ir_gb_obc_gamma
# not relevant here
# ir_gb_dielectric_offset = 0.009
# ir_sa_algorithm = 0 # esaAPPROX
data.unpack_real() # ir_sa_surface_tension
data.unpack_int() # ir_nkx
data.unpack_int() # ir_nky
data.unpack_int() # ir_nkz
data.unpack_int() # ir_pme_order
data.unpack_real() # ir_ewald_rtol
data.unpack_int() # ir_ewald_geometry
data.unpack_real() # ir_epsilon_surface
data.unpack_bool() # ir_bOptFFT
data.unpack_bool() # ir_bContinuation
data.unpack_int() # ir_etc
# not relevant here
# ir_nsttcouple = ir_nstcalcenergy
data.unpack_int() # ir_epcpressure coupling
data.unpack_int() # ir_epctepctype, e.g. isotropic
# not relevant here
# ir_nstpcouple = ir_nstcalcenergy
data.unpack_real() # tau_p
data.unpack_farray(DIM, data.unpack_real) # ir_ref_p_XX
data.unpack_farray(DIM, data.unpack_real) # ir_ref_p_YY
data.unpack_farray(DIM, data.unpack_real) # ir_ref_p_ZZ
data.unpack_farray(DIM, data.unpack_real) # ir_compress_XX
data.unpack_farray(DIM, data.unpack_real) # ir_compress_YY
data.unpack_farray(DIM, data.unpack_real) # ir_compress_ZZ
data.unpack_int() # ir_refcoord_scaling
data.unpack_farray(DIM, data.unpack_real) # ir_posres_com
data.unpack_farray(DIM, data.unpack_real) # ir_posres_comB
data.unpack_int() # ir_andersen_seed
data.unpack_real() # ir_shake_tol
data.unpack_int() # ir_efep
data.unpack_real() # ir_init_lambda = rdum =
data.unpack_real() # ir_delta_lambda = rdum =
# Not relevant here
# ir_n_flambda = 0
# ir_flambda = None
data.unpack_real() # ir_sc_alpha
data.unpack_int() # ir_sc_power
data.unpack_real() # ir_sc_sigma
# not relevant here
# ir_sc_sigma_min = 0
# ir_nstdhdl = 1
# ir_separate_dhdl_file = 0 # epdhdlfileYES;
# ir_dhdl_derivatives = 0 # dhdlderivativesYES
# ir_dh_hist_size = 0
# ir_dh_hist_spacing = 0.1
data.unpack_int() # ir_eDisre
data.unpack_int() # ir_eDisre_weighting
data.unpack_bool() # ir_bDisreMixed
data.unpack_real() # ir_dr_fc
data.unpack_real() # ir_dr_tau
data.unpack_int() # ir_nstdisreout
data.unpack_real() # ir_orires_fc
data.unpack_real() # ir_orires_tau
data.unpack_int() # ir_nstorireout
data.unpack_real() # ir_dihre_fc
data.unpack_real() # ir_em_stepsize
data.unpack_real() # ir_em_tol
data.unpack_bool() # ir_bShakeSOR
data.unpack_int() # ir_niter
data.unpack_real() # ir_fc_stepsize
data.unpack_int() # ir_eConstrAlg
data.unpack_int() # ir_nProjOrder
data.unpack_real() # ir_LincsWarnAngle
data.unpack_int() # ir_nLincsIter
data.unpack_real() # ir_bd_fric
data.unpack_int() # ir_ld_seed
ndo_rvec(data, DIM) # ir_deform
data.unpack_real() # ir_cos_accel
data.unpack_int() # ir_userint1
data.unpack_int() # ir_userint2
data.unpack_int() # ir_userint3
data.unpack_int() # ir_userint4
data.unpack_real() # ir_userreal1
data.unpack_real() # ir_userreal2
data.unpack_real() # ir_userreal3
data.unpack_real() # ir_userreal4
# pull_stuff
data.unpack_int() # ir_ePull
# grpopts stuff
ir_opts_ngtc = data.unpack_int()
# not relevant here
# ir_opts_nhchainlength = 1
ir_opts_ngacc = data.unpack_int()
ir_opts_ngfrz = data.unpack_int()
ir_opts_ngener = data.unpack_int()
ndo_real(data, ir_opts_ngtc) # ir_nrdf
ndo_real(data, ir_opts_ngtc) # ir_ref_t
ndo_real(data, ir_opts_ngtc) # ir_tau_t
if ir_opts_ngfrz > 0:
ndo_ivec(data, ir_opts_ngfrz) # ir_opts_nFreeze
if ir_opts_ngacc > 0:
ndo_rvec(data, ir_opts_ngacc) # ir_opts_acc
ndo_int(data, ir_opts_ngener ** 2) # ir_opts_egp_flags
ndo_int(data, ir_opts_ngtc) # ir_opts_annealing
ir_opts_anneal_npoints = ndo_int(data, ir_opts_ngtc)
ir_opts_anneal_time = []
ir_opts_anneal_temp = []
for j in range(ir_opts_ngtc):
k = ir_opts_anneal_npoints[j]
ir_opts_anneal_time.append(ndo_int(data, k))
ir_opts_anneal_temp.append(ndo_int(data, k))
# Walls
data.unpack_int() # ir_nwall
data.unpack_int() # ir_nwall_type
data.unpack_real() # ir_wall_r_linpot
# ir->wall_atomtype[0], ir->wall_atomtype[1]
ir_wall_atomtype = []
ir_wall_atomtype.append(data.unpack_int())
ir_wall_atomtype.append(data.unpack_int())
# ir->wall_density[0], ir->wall_density[1]
ir_wall_density = []
ir_wall_density.append(data.unpack_real())
ir_wall_density.append(data.unpack_real())
data.unpack_real() # ir_wall_ewald_zfac
# cosine stuff for electric fields
ir_ex_n, ir_et_n, ir_ex_a, ir_ex_phi, ir_et_a, ir_et_phi = [], [], [], [], [], []
for j in range(DIM):
x = data.unpack_int()
ir_ex_n.append(x)
y = data.unpack_int()
ir_et_n.append(y)
ir_ex_a.append(ndo_real(data, x))
ir_ex_phi.append(ndo_real(data, x))
ir_et_a.append(ndo_real(data, y))
ir_et_phi.append(ndo_real(data, y))
# QMM stuff
data.unpack_bool() # ir_bQMMM
data.unpack_int() # ir_bQMMMscheme
data.unpack_real() # ir_scalefactor
data.unpack_int() # ir_opts_ngQM
# if ir_opts_ngQM > 0:
# do_something
# indicating the parsing finishes properly
data.done()
| kain88-de/mdanalysis | package/MDAnalysis/topology/tpr/utils.py | Python | gpl-2.0 | 34,782 | [
"Gromacs",
"MDAnalysis"
] | 255350b1035b6f6474f4faf014b7653d088bd979f85ca44dc322a7af9f03a784 |
'''OpenAnything: a kind and thoughtful library for HTTP web services
This program is part of 'Dive Into Python', a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
'''
__author__ = 'Mark Pilgrim ([email protected])'
__version__ = '$Revision: 1.6 $'[11:-2]
__date__ = '$Date: 2004/04/16 21:16:24 $'
__copyright__ = 'Copyright (c) 2004 Mark Pilgrim'
__license__ = 'Python'
import sys
from urllib import urlencode
import urllib2
import urlparse
import gzip
import mimetypes
from StringIO import StringIO
from mapConf import MapConf
conf = MapConf(None)
USER_AGENT = '%s/%s +%s' % (conf.name, conf.version, conf.web_address)
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def encode_post_data_dict(post_data):
data = []
for key in post_data.keys():
data.append(urlencode(key) + '=' + urlencode(post_data[key]))
return '&'.join(data)
def encode_post_data(post_data):
data = []
for x in post_data:
data.append(urlencode(x[0]) + '=' + urlencode(x[1]))
return '&'.join(data)
def openAnything(source, etag=None, lastmodified=None, agent=USER_AGENT, post_data=None, files=None):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
"""
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if isinstance(post_data, dict):
post_data_dict = post_data
post_data = []
for key in post_data_dict.keys():
post_data.append((key, post_data_dict[key]))
protocol = urlparse.urlparse(source)[0]
if protocol == 'http' or protocol == 'https':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
if etag:
request.add_header('If-None-Match', etag)
if post_data and files:
content_type, body = encode_multipart_formdata(post_data, files)
request.add_header('Content-Type', content_type)
request.add_data(body)
elif post_data:
request.add_data(encode_post_data(post_data))
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, lastmodified=None, agent=USER_AGENT, post_data=None, files=None):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, lastmodified, agent, post_data, files)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
if f.status == 302:
result['status'] = 200
else:
result['status'] = f.status
f.close()
return result
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(open(filename, 'rb').read())
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
#print '--== encode_multipart_formdata:body ==--'
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| mdraeger/gmapcatcher | gmapcatcher/openanything.py | Python | gpl-2.0 | 6,305 | [
"VisIt"
] | dd55efce23f2df5c1918ee51af50b2af7b2f540f9060fe06149f30187af586b8 |
# -*- coding: utf-8 -*-
#############################################################################
# #
# FLEQ (Free LibreSoft Educational Quizbowl) #
# A synchronous on-line competition software to improve and #
# motivate learning. #
# #
# Copyright (C) 2012 Arturo Moral, Gregorio Robles & Félix Redondo #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Contact authors : Arturo Moral <[email protected]> #
# Gregorio Robles <[email protected]> #
# Félix Redondo <[email protected]> #
# #
#############################################################################
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from datetime import *
import datetime
from operator import itemgetter
from fleq.quizbowl.models import Date_time, Game, Preferred_start_time, Question_review, Round, Score, Tournament, Question, UserProfile
from fleq.quizbowl.views_notify import notify_user
from fleq.quizbowl.views_language import strLang, setBox
from fleq.quizbowl.views_tournaments_api import *
from fleq.quizbowl.views_connect import EditProfileForm, ChangePasswordForm
##################################
# MY TOURNAMENTS
##################################
# Shows active Tournaments of user (admin tournament or player tournament)
def myTournaments(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/register')
# SIDEBAR INFO
myTournaments = myActiveTournaments(request)
myAdmnTournaments = myAdminTournaments(request)
myFinishedTournaments = myPastTournaments(request)
todayGames = myTodayGames(request)
nextGames = myNextGames(request)
pendingQR = myAdminPendingQuestionReviews(request.user)
# Player hasn't got any Tournament, redirect to Tournaments
if len(myAdmnTournaments) == 0 and len(myTournaments) == 0 and len(myFinishedTournaments) == 0 and not request.user.is_superuser:
return HttpResponseRedirect('/tournaments?status=info_join_tournaments')
elif len(myAdmnTournaments) == 0 and len(myTournaments) == 0 and len(myFinishedTournaments) == 0 and request.user.is_superuser:
return HttpResponseRedirect('/admin/new-tournament?status=info_new_tournament')
dateNow = datetime.datetime.now()
tournamentCategories = []
for t in myTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
tournamentCategories.append(c)
adminTournamentCategories = []
for t in myAdmnTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
adminTournamentCategories.append(c)
finishedTournamentCategories = []
for t in myFinishedTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
finishedTournamentCategories.append(c)
# Load strings language to template mytournaments.html
try:
lang = strLang()
except:
lang = ''
# Must we show a notification user?
try:
if request.GET['status']:
box = setBox(request.GET['status'])
except:
box = ''
# Info about user
user_me = UserProfile.objects.get(user=request.user)
if request.user.has_perm('fleq.quizbowl.add_tournament'):
admin_user = True
else:
admin_user = False
return render_to_response('mytournaments.html', {
'user_me': user_me,
'myTournaments': myTournaments,
'myAdminTournaments': myAdmnTournaments,
'myFinishedTournaments': myFinishedTournaments,
'tournamentCategories': tournamentCategories,
'adminTournamentCategories': adminTournamentCategories,
'finishedTournamentCategories': finishedTournamentCategories,
'dateNow': dateNow,
'lang': lang,
'box': box,
'todayGames': todayGames,
'nextGames': nextGames,
'admin_user': admin_user,
'pendingQR': pendingQR,
})
##################################
# TOURNAMENTS
##################################
# Show information about active Tournaments and future Tournaments
def tournaments(request):
# Load strings language to template tournaments.html
try:
lang = strLang()
except:
lang = ''
# SIDEBAR INFO
if request.user.is_authenticated():
myTournaments = myActiveTournaments(request)
myAdmnTournaments = myAdminTournaments(request)
todayGames = myTodayGames(request)
nextGames = myNextGames(request)
user_me = UserProfile.objects.get(user=request.user)
pendingQR = myAdminPendingQuestionReviews(request.user)
else:
myTournaments = ''
myAdmnTournaments = ''
todayGames = ''
nextGames = ''
user_me = request.user
pendingQR = 0
if request.user.has_perm('fleq.quizbowl.add_tournament'):
admin_user = True
else:
admin_user = False
# Must we show a notification user?
try:
if request.GET['status']:
box = setBox(request.GET['status'])
except:
box = ''
# Select all next and active Tournaments
nextTournaments = Tournament.objects.exclude(start_date__lte = datetime.date.today())
activeTournaments = Tournament.objects.filter(Q(finish_date__gte = datetime.datetime.now())).order_by('-start_date')
finishedTournaments = Tournament.objects.filter(Q(finish_date__lte = datetime.datetime.now())).order_by('-start_date')[:10]
# Select all categories from Tournaments
tournamentCategories = []
for t in nextTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
tournamentCategories.append(c)
for t in activeTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
tournamentCategories.append(c)
for t in finishedTournaments:
categories = t.categories.all()
c = {}
c['tid'] = t.pk
c['categories'] = categories
tournamentCategories.append(c)
# Select all players from Tournaments
tournamentPlayers = []
return render_to_response('tournaments.html', {
'user_me': user_me,
'nextTournaments': nextTournaments,
'activeTournaments': activeTournaments,
'finishedTournaments': finishedTournaments,
'tournamentCategories': tournamentCategories,
'box': box,
'lang': lang,
'myTournaments': myTournaments,
'myAdminTournaments': myAdmnTournaments,
'todayGames': todayGames,
'nextGames': nextGames,
'admin_user': admin_user,
'pendingQR': pendingQR,
})
##################################
# TOURNAMENTSINFO
##################################
# Show information about a Tournament
def tournamentInfo(request, sid):
try:
tournament = Tournament.objects.get(sid = sid)
except:
return HttpResponseRedirect('/tournaments?status=error_tournament_no_exists')
# Load strings language to template tournamentinfo.html
try:
lang = strLang()
except:
lang = ''
if request.user.has_perm('fleq.quizbowl.add_tournament'):
admin_user = True
else:
admin_user = False
# Must we show a notification user?
try:
if request.GET['status']:
box = setBox(request.GET['status'])
except:
box = ''
# SIDEBAR INFO
if request.user.is_authenticated():
myTournaments = myActiveTournaments(request)
myAdmnTournaments = myAdminTournaments(request)
todayGames = myTodayGames(request)
nextGames = myNextGames(request)
# Info about user
user_me = UserProfile.objects.get(user=request.user)
pendingQR = myAdminPendingQuestionReviews(request.user)
else:
myTournaments = ""
myAdmnTournaments = ""
todayGames = ""
nextGames = ""
user_me = ""
pendingQR = 0
# Info about Tournaments
dateNow = datetime.datetime.now()
name = tournament.name
startDate = tournament.start_date
finishDate = tournament.finish_date
numberRounds = tournament.rounds
rounds = Round.objects.filter(tournament = tournament).order_by('round_number')
# Check if a user can join to this tournament now
if startDate >= dateNow.date() and request.user != tournament.admin:
for player in tournament.players.all():
if player == request.user:
userJoined = True
break
else:
userJoined = False
else:
userJoined = True
# Generate Score Table by this Tournament
allscores = Score.objects.filter(tournament = tournament).order_by('-points', '-questions_won', 'questions_lost', 'player') # Extract all info about scores and players in a Tournament
scores = []
pos = 0
for userScore in allscores:
userProfile = UserProfile.objects.get(user=userScore.player)
user = {}
user['profile'] = userProfile
user['score'] = userScore.points
# Create tournament positions
if pos == 0:
user['pos'] = pos+1
else:
if scores[pos-1]['score'] == userScore.points:
user['pos'] = scores[pos-1]['pos']
else:
user['pos'] = pos+1
# Initializing vars for question stats
user['winner_questions'] = userScore.questions_won
user['loser_questions'] = userScore.questions_lost
user['winner_games'] = 0
user['loser_games'] = 0
# For each user, calculate how many games did he play
gamesUser = []
for r in rounds:
game = Game.objects.filter(Q(round = r), Q(player1 = userProfile.user) | Q(player2 = userProfile.user))
try:
#if game[0] and game[0].log:
if game[0]:
gamesUser.append(game)
# Total points won and lost
try:
if game[0].winner != userScore.player:
user['loser_games'] += 1
else:
user['winner_games'] += 1
except:
continue
except:
continue
user['reflection_days'] = user['score'] - user['winner_games']
user['total_games'] = len(gamesUser)
# Save user stats and increment counter var
scores.append(user)
pos += 1
# Select all games if you are admin and only user games if you aren't a superuser
allGames = []
myGames = []
if tournament.admin == request.user or request.user.is_superuser:
for r in rounds:
games = Game.objects.filter(round = r, round__tournament = tournament)
for game in games:
g = {}
g['rid'] = r.pk
g['game'] = game
allGames.append(g)
else:
if request.user.is_authenticated():
myGames = Game.objects.filter(Q(player1 = request.user) | Q(player2 = request.user))
else:
myGames = ""
return render_to_response('tournamentinfo.html', {
'user_me': user_me,
'tournament': tournament,
'name': name,
'startDate': startDate,
'finishDate': finishDate,
'dateNow': dateNow,
'numberRounds': numberRounds,
'scores': scores,
'rounds': rounds,
'myGames': myGames,
'allGames': allGames,
'lang': lang,
'box': box,
'myTournaments': myTournaments,
'myAdminTournaments': myAdmnTournaments,
'todayGames': todayGames,
'nextGames': nextGames,
'userJoined': userJoined, # Boolean variable. Shows if a user is the admin tournament or if he/she has joined to this Tournament
'admin_user': admin_user,
'pendingQR': pendingQR,
})
def tournamentJoin(request, sid):
if not request.user.is_authenticated():
return HttpResponseRedirect('/register?status=error_register_before_join_tournament')
try:
tournament = Tournament.objects.get(sid = sid)
except:
return HttpResponseRedirect('/tournaments?status=error_tournament_no_exists')
# players are added automatically to not started tournament the first time they visit tournament's site
if (request.user != tournament.admin) and (not request.user in tournament.players.all()) and (datetime.date.today() < tournament.start_date):
tournament.players.add(request.user)
tournament.save()
s = Score(player = request.user, tournament = tournament)
s.save()
return HttpResponseRedirect('/tournaments/{{ tournament.sid }}?status=success_join_tournament')
else:
if datetime.date.today() >= tournament.start_date:
return HttpResponseRedirect('/tournaments/' + tournament.sid + '?status=error_join_tournament_expired')
elif request.user == tournament.admin:
return HttpResponseRedirect('/tournaments/' + tournament.sid + '?status=error_join_tournament_admin')
elif request.user in tournament.players.all():
return HttpResponseRedirect('/tournaments/' + tournament.sid + '?status=error_join_tournament_joined')
else:
return HttpResponseRedirect('/tournaments/' + tournament.sid + '?status=error_join_tournament')
| cristilav/FLEQ | quizbowl/views_tournaments.py | Python | agpl-3.0 | 13,518 | [
"VisIt"
] | 7024a6ea7e20af41a622bbd3173b2ed4588da46397db9859f7cc97f77ccda2c6 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_move_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance_obj(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance_obj(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance_obj(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metric(self, name):
return 100, self.NOW_TS
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': timeutils.strtime(
FakeCPUMonitor.NOW_TS),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
| LoHChina/nova | nova/tests/unit/compute/test_resource_tracker.py | Python | apache-2.0 | 58,703 | [
"exciting"
] | 6772bd03b3a08ea166f2d7dc8581114c81fb6fccc41ff8ef970f7a31bd3c13ec |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_jobs')
@click.option(
"--state",
help="Job states to filter on."
)
@click.option(
"--history_id",
help="Encoded history ID to filter on.",
type=str
)
@click.option(
"--invocation_id",
help="Encoded workflow invocation ID to filter on.",
type=str
)
@click.option(
"--tool_id",
help="Tool IDs to filter on.",
type=str,
multiple=True
)
@click.option(
"--workflow_id",
help="Encoded workflow ID to filter on.",
type=str
)
@click.option(
"--user_id",
help="Encoded user ID to filter on. Only admin users can access the jobs of other users.",
type=str
)
@click.option(
"--date_range_min",
help="Mininum job update date (in YYYY-MM-DD format) to filter on.",
type=str
)
@click.option(
"--date_range_max",
help="Maximum job update date (in YYYY-MM-DD format) to filter on.",
type=str
)
@click.option(
"--limit",
help="Maximum number of jobs to return.",
default="500",
show_default=True,
type=int
)
@click.option(
"--offset",
help="Return jobs starting from this specified position. For example, if ``limit`` is set to 100 and ``offset`` to 200, jobs 200-299 will be returned.",
type=int
)
@click.option(
"--user_details",
help="If ``True`` and the user is an admin, add the user email to each returned job dictionary.",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, state="", history_id="", invocation_id="", tool_id="", workflow_id="", user_id="", date_range_min="", date_range_max="", limit=500, offset=0, user_details=False):
"""Get all jobs, or select a subset by specifying optional arguments for filtering (e.g. a state).
Output:
Summary information for each selected job.
For example::
[{'create_time': '2014-03-01T16:16:48.640550',
'exit_code': 0,
'id': 'ebfb8f50c6abde6d',
'model_class': 'Job',
'state': 'ok',
'tool_id': 'fasta2tab',
'update_time': '2014-03-01T16:16:50.657399'},
{'create_time': '2014-03-01T16:05:34.851246',
'exit_code': 0,
'id': '1cd8e2f6b131e891',
'model_class': 'Job',
'state': 'ok',
'tool_id': 'upload1',
'update_time': '2014-03-01T16:05:39.558458'}]
.. note::
The following filtering options can only be used with Galaxy ``release_21.05`` or later:
user_id, limit, offset, workflow_id, invocation_id
"""
return ctx.gi.jobs.get_jobs(state=state, history_id=history_id, invocation_id=invocation_id, tool_id=tool_id, workflow_id=workflow_id, user_id=user_id, date_range_min=date_range_min, date_range_max=date_range_max, limit=limit, offset=offset, user_details=user_details)
| galaxy-iuc/parsec | parsec/commands/jobs/get_jobs.py | Python | apache-2.0 | 2,961 | [
"Galaxy"
] | eac8205a389d353144edf2b717c38260801e11b737d3c5f0769e62dc2f813c98 |
# File: AthenaCommon/share/SystemOfUnits.py
# Author: Wim Lavrijsen (LBNL, [email protected])
# Created: 01/21/04
# Last: 01/21/04
# This script is a direct adaptation of CLHEP/Units/SystemOfUnits.h
# and the following is the originial CLHEP comment:
#
# -----
# HEP coherent system of Units
#
# This file has been provided to CLHEP by Geant4 (simulation toolkit for HEP).
#
# The basic units are :
# millimeter (millimeter)
# nanosecond (nanosecond)
# Mega electron Volt (MeV)
# positron charge (eplus)
# degree Kelvin (kelvin)
# the amount of substance (mole)
# luminous intensity (candela)
# radian (radian)
# steradian (steradian)
#
# Below is a non exhaustive list of derived and pratical units
# (i.e. mostly the SI units).
# You can add your own units.
#
# The SI numerical value of the positron charge is defined here,
# as it is needed for conversion factor : positron charge = e_SI (coulomb)
#
# The others physical constants are defined in the header file :
# PhysicalConstants.h
#
# Authors: M.Maire, S.Giani
#
# History:
#
# 06.02.96 Created.
# 28.03.96 Added miscellaneous constants.
# 05.12.97 E.Tcherniaev: Redefined pascal (to avoid warnings on WinNT)
# 20.05.98 names: meter, second, gram, radian, degree
# (from [email protected] (STAR)). Added luminous units.
# 05.08.98 angstrom, picobarn, microsecond, picosecond, petaelectronvolt
# 01.03.01 parsec
# -----
#
# Length [L]
#
millimeter = 0.1
millimeter2 = millimeter*millimeter
millimeter3 = millimeter*millimeter*millimeter
centimeter = 10.*millimeter
centimeter2 = centimeter*centimeter
centimeter3 = centimeter*centimeter*centimeter
meter = 1000.*millimeter
meter2 = meter*meter
meter3 = meter*meter*meter
kilometer = 1000.*meter
kilometer2 = kilometer*kilometer
kilometer3 = kilometer*kilometer*kilometer
parsec = 3.0856775807e+16*meter
micrometer = 1.e-6 *meter
nanometer = 1.e-9 *meter
angstrom = 1.e-10*meter
fermi = 1.e-15*meter
barn = 1.e-28*meter2
millibarn = 1.e-3 *barn
microbarn = 1.e-6 *barn
nanobarn = 1.e-9 *barn
picobarn = 1.e-12*barn
# symbols
mm = millimeter
mm2 = millimeter2
mm3 = millimeter3
cm = centimeter
cm2 = centimeter2
cm3 = centimeter3
m = meter
m2 = meter2
m3 = meter3
km = kilometer
km2 = kilometer2
km3 = kilometer3
pc = parsec
#
# Angle
#
radian = 1.
milliradian = 1.e-3*radian
degree = (3.14159265358979323846/180.0)*radian
steradian = 1.
# symbols
rad = radian
mrad = milliradian
sr = steradian
deg = degree
#
# Time [T]
#
nanosecond = 1.
second = 1.e+9 *nanosecond
millisecond = 1.e-3 *second
microsecond = 1.e-6 *second
picosecond = 1.e-12*second
femtosecond = 1.e-15*second
hertz = 1./second
kilohertz = 1.e+3*hertz
megahertz = 1.e+6*hertz
# symbols
ns = nanosecond
s = second
ms = millisecond
#
# Electric charge [Q]
#
eplus = 1. # positron charge
e_SI = 1.60217733e-19 # positron charge in coulomb
coulomb = eplus/e_SI # coulomb = 6.24150 e+18 * eplus
#
# Energy [E]
#
megaelectronvolt = 1.
electronvolt = 1.e-6*megaelectronvolt
kiloelectronvolt = 1.e-3*megaelectronvolt
gigaelectronvolt = 1.e+3*megaelectronvolt
teraelectronvolt = 1.e+6*megaelectronvolt
petaelectronvolt = 1.e+9*megaelectronvolt
joule = electronvolt/e_SI # joule = 6.24150 e+12 * MeV
# symbols
MeV = megaelectronvolt
eV = electronvolt
keV = kiloelectronvolt
GeV = gigaelectronvolt
TeV = teraelectronvolt
PeV = petaelectronvolt
#
# Mass [E][T^2][L^-2]
#
kilogram = joule*second*second/(meter*meter)
gram = 1.e-3*kilogram
milligram = 1.e-3*gram
# symbols
kg = kilogram
g = gram
mg = milligram
#
# Power [E][T^-1]
#
watt = joule/second # watt = 6.24150 e+3 * MeV/ns
#
# Force [E][L^-1]
#
newton = joule/meter # newton = 6.24150 e+9 * MeV/mm
#
# Pressure [E][L^-3]
#
hep_pascal = newton/m2 # pascal = 6.24150 e+3 * MeV/mm3
pascal = hep_pascal # a trick to avoid warnings
bar = 100000*pascal # bar = 6.24150 e+8 * MeV/mm3
atmosphere = 101325*pascal # atm = 6.32420 e+8 * MeV/mm3
#
# Electric current [Q][T^-1]
#
ampere = coulomb/second # ampere = 6.24150 e+9 * eplus/ns
milliampere = 1.e-3*ampere
microampere = 1.e-6*ampere
nanoampere = 1.e-9*ampere
#
# Electric potential [E][Q^-1]
#
megavolt = megaelectronvolt/eplus
kilovolt = 1.e-3*megavolt
volt = 1.e-6*megavolt
#
# Electric resistance [E][T][Q^-2]
#
ohm = volt/ampere # ohm = 1.60217e-16*(MeV/eplus)/(eplus/ns)
#
# Electric capacitance [Q^2][E^-1]
#
farad = coulomb/volt # farad = 6.24150e+24 * eplus/Megavolt
millifarad = 1.e-3*farad
microfarad = 1.e-6*farad
nanofarad = 1.e-9*farad
picofarad = 1.e-12*farad
#
# Magnetic Flux [T][E][Q^-1]
#
weber = volt*second # weber = 1000*megavolt*ns
#
# Magnetic Field [T][E][Q^-1][L^-2]
#
tesla = volt*second/meter2 # tesla =0.001*megavolt*ns/mm2
gauss = 1.e-4*tesla
kilogauss = 1.e-1*tesla
#
# Inductance [T^2][E][Q^-2]
#
henry = weber/ampere # henry = 1.60217e-7*MeV*(ns/eplus)**2
#
# Temperature
#
kelvin = 1.
#
# Amount of substance
#
mole = 1.
#
# Activity [T^-1]
#
becquerel = 1./second
curie = 3.7e+10 * becquerel
#
# Absorbed dose [L^2][T^-2]
#
gray = joule/kilogram
#
# Luminous intensity [I]
#
candela = 1.
#
# Luminous flux [I]
#
lumen = candela*steradian
#
# Illuminance [I][L^-2]
#
lux = lumen/meter2
#
# Miscellaneous
#
perCent = 0.01
perThousand = 0.001
perMillion = 0.000001
| vvolkl/DD4hep | DDCore/python/SystemOfUnits.py | Python | gpl-3.0 | 6,279 | [
"Brian"
] | 3fe1a8110be65899a50c0ba4be68542abf7787cea1cd212f1ea7918cf7e5e037 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
from __future__ import print_function
import os
import numpy as np
import unittest as ut
import espressomd
import espressomd.magnetostatics as magnetostatics
import tests_common
@ut.skipIf(not espressomd.has_features(["SCAFACOS_DIPOLES"]),
"Features not available, skipping test!")
class Scafacos1d2d(ut.TestCase):
def test_scafacos(self):
rho = 0.3
# This is only for box size calculation. The actual particle numbwe is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
dipole_lambda = 3.0
#################################################
box_l = pow(((4 * n_particle * 3.141592654) / (3 * rho)),
1.0 / 3.0) * particle_radius
skin = 0.5
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
# give Espresso some parameters
s.time_step = 0.01
s.cell_system.skin = skin
s.box_l = box_l, box_l, box_l
for dim in 2, 1:
print("Dimension", dim)
# Read reference data
if dim == 2:
file_prefix = "data/mdlc"
s.periodicity = 1, 1, 0
else:
s.periodicity = 1, 0, 0
file_prefix = "data/scafacos_dipoles_1d"
f = open(tests_common.abspath(
file_prefix + "_reference_data_energy.dat"))
ref_E = float(f.readline())
f.close()
# Particles
data = np.genfromtxt(tests_common.abspath(
file_prefix + "_reference_data_forces_torques.dat"))
for p in data[:, :]:
s.part.add(
id=int(p[0]), pos=p[1:4], dip=p[4:7], rotation=(1, 1, 1))
if dim == 2:
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "80,80,160",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "6",
"p2nfft_alpha": "0.8",
"p2nfft_epsB": "0.05"})
s.actors.add(scafacos)
# change box geometry in x,y direction to ensure that
# scafacos survives it
s.box_l = np.array((1, 1, 1.3)) * box_l
else:
if dim == 1:
# 1d periodic in x
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 1,
"pnfft_N": "32,128,128",
"pnfft_direct": 0,
"p2nfft_r_cut": 2.855,
"p2nfft_alpha": "1.5",
"p2nfft_intpol_order": "-1",
"p2nfft_reg_kernel_name": "ewald",
"p2nfft_p": "16",
"p2nfft_ignore_tolerance": "1",
"pnfft_window_name": "bspline",
"pnfft_m": "8",
"pnfft_diff_ik": "1",
"p2nfft_epsB": "0.125"})
s.box_l = np.array((1, 1, 1)) * box_l
s.actors.add(scafacos)
else:
raise Exception("This shouldn't happen.")
s.thermostat.turn_off()
s.integrator.run(0)
# Calculate errors
err_f = np.sum(np.sqrt(
np.sum((s.part[:].f - data[:, 7:10])**2, 1)), 0) / np.sqrt(data.shape[0])
err_t = np.sum(np.sqrt(np.sum(
(s.part[:].torque_lab - data[:, 10:13])**2, 1)), 0) / np.sqrt(data.shape[0])
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(
abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(
abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(
abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
if __name__ == "__main__":
#print("Features: ", espressomd.features())
ut.main()
| hmenke/espresso | testsuite/python/scafacos_dipoles_1d_2d.py | Python | gpl-3.0 | 5,774 | [
"ESPResSo"
] | 7d7ab21a46897177f1c5cf5f98b0800f77b7199baa426fe7feab0b6ad092bc23 |
import astropy.io.fits as fits
import numpy as np
def lumwtd(filename='colira.fits'):
s = fits.getdata(filename)
keep = (s['INTERF'] != 2)# & (s['GALNAME']=='ngc3034')
W10 = np.nansum(s['CO10'][keep])
W21 = np.nansum(s['CO21'][keep])
W32 = np.nansum(s['CO32'][keep])
W10_err = (np.nansum((s['CO10_ERR'][keep])**2))**0.5
W21_err = (np.nansum((s['CO21_ERR'][keep])**2))**0.5
W32_err = (np.nansum((s['CO32_ERR'][keep])**2))**0.5
print('R21 = {0:4f} +/- {1:4f}'.format(\
W21/W10,W21/W10*((W10_err/W10)**2+(W21_err/W21)**2)**0.5))
print('R32 = {0:4f} +/- {1:4f}'.format(\
W32/W21,W32/W21*((W32_err/W32)**2+(W21_err/W21)**2)**0.5))
print('R31 = {0:4f} +/- {1:4f}'.format(\
W32/W10,W32/W10*((W32_err/W32)**2+(W10_err/W10)**2)**0.5))
def lumwtd_bygal(filename='colira.fits'):
s = fits.getdata(filename)
for name in np.unique(s['GALNAME']):
keep = (s['INTERF'] == 1) & (s['GALNAME']==name) & (s['SPIRE1']>10)
if np.any(keep):
W10 = np.nansum(s['CO10'][keep])
W21 = np.nansum(s['CO21'][keep])
W32 = np.nansum(s['CO32'][keep])
W10_err = (np.nansum((s['CO10_ERR'][keep])**2))**0.5
W21_err = (np.nansum((s['CO21_ERR'][keep])**2))**0.5
W32_err = (np.nansum((s['CO32_ERR'][keep])**2))**0.5
print('Galaxy: '+name)
print('R21 = {0:4f} +/- {1:4f}'.format(\
W21/W10,W21/W10*((W10_err/W10)**2+(W21_err/W21)**2)**0.5))
print('R32 = {0:4f} +/- {1:4f}'.format(\
W32/W21,W32/W21*((W32_err/W32)**2+(W21_err/W21)**2)**0.5))
print('R31 = {0:4f} +/- {1:4f}'.format(\
W32/W10,W32/W10*((W32_err/W32)**2+(W10_err/W10)**2)**0.5))
| low-sky/colira | bayes/lumwtd.py | Python | gpl-2.0 | 1,761 | [
"Galaxy"
] | 67861f3eceb2356c459f22fa3712c12ff4de63922d9933a8de84d5f243a9b9fb |
# Copyright 2013 by Kai Blin.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import unittest
from os import path
from Bio import GenBank
from Bio import SeqIO
class GenBankTests(unittest.TestCase):
def test_invalid_product_line_raises_value_error(self):
"""Test GenBank parsing invalid product line raises ValueError"""
def parse_invalid_product_line():
rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'),
'genbank')
self.assertRaises(ValueError, parse_invalid_product_line)
def test_genbank_read(self):
with open(path.join("GenBank", "NC_000932.gb")) as handle:
record = GenBank.read(handle)
self.assertEqual(['NC_000932'], record.accession)
def test_genbank_read_multirecord(self):
with open(path.join("GenBank", "cor6_6.gb")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
def test_genbank_read_invalid(self):
with open(path.join("GenBank", "NC_000932.faa")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_GenBank_unittest.py | Python | gpl-2.0 | 1,343 | [
"Biopython"
] | 619a8876a77314328b4321787dca25eafe5caf6f60680b6c0d53b9168c927056 |
from datetime import datetime
import json
import logging
from . import configuration
from . import dispersiongrid as dg
from . import dispersion_file_utils as dfu
from . import dispersionimages
from . import smokedispersionkml
from . import fires
def main(options):
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
# Note: The log messages in this module are intended to be info level. The
# verbose setting affects log messages in other modules in this package.
logging.info("Starting Make Dispersion KML.")
config = configuration.ConfigBuilder(options).config
parameters = (config.get('DispersionGridInput', "PARAMETERS")
or config.get('DispersionGridInput', "PARAMETER"))
if not parameters:
raise ValueError ("No NetCDF parameter(s) supplied.")
if hasattr(parameters, "capitalize"):
parameters = parameters.split()
# this will load fires and events, dump to json, and
# update the daily images utc offsets field if it was
# auto
fires_manager = fires.FiresManager(config)
all_parameter_args = []
for parameter in parameters:
# Determine which mode to run OutputKML in
if 'dispersion' in config.get('DEFAULT', 'MODES').split():
# Create dispersion images directory within the specified
# bsf output directory
# For backwards compatibility, support old config key 'PARAMETER'
dfu.create_dispersion_images_dir(config, parameter)
# Generate smoke dispersion images
logging.info("Processing smoke dispersion NetCDF data into plot images...")
start_datetime, grid_bbox, heights = dg.create_dispersion_images(config, parameter)
# Output dispersion grid bounds
_output_grid_bbox(grid_bbox, config)
# Post process smoke dispersion images
logging.info("Formatting dispersion plot images...")
dispersionimages.format_dispersion_images(config, parameter, heights)
else:
start_datetime = config.get("DEFAULT", "DATE") if config.has_option("DEFAULT", "DATE") else datetime.now()
heights = None
grid_bbox = None
all_parameter_args.append({
"parameter": parameter,
"start_datetime": start_datetime,
"heights": heights,
"grid_bbox": grid_bbox
})
# Generate single KMZ
smokedispersionkml.KmzCreator(config, all_parameter_args, fires_manager).create_all()
# If enabled, reproject concentration images to display in a different projection
if config.getboolean('DispersionImages', 'REPROJECT_IMAGES'):
for a in all_parameter_args:
dispersionimages.reproject_images(config, a['parameter'],
a['grid_bbox'], a['heights'])
logging.info("Make Dispersion finished.")
def _output_grid_bbox(grid_bbox, config):
grid_info_file = config.get('DispersionGridOutput', "GRID_INFO_JSON")
if grid_info_file is not None:
logging.info("Outputting grid bounds to %s." % grid_info_file)
grid_info_dict = {'bbox': grid_bbox}
grid_info_json = json.dumps(grid_info_dict)
with open(grid_info_file, 'w') as fout:
fout.write(grid_info_json)
| pnwairfire/blueskykml | blueskykml/makedispersionkml.py | Python | gpl-3.0 | 3,293 | [
"NetCDF"
] | bfa7afa27966dfb4e6b5b22223c906b0c91702a265c92f8bd308481b88fac35b |
from buildbot.plugins import steps
from buildbot.steps.shell import ShellCommand
from buildbot.locks import SlaveLock
from buildbot.process.properties import Interpolate
class Steps:
def __init__(self,Environ):
# Max number of running builds
build_lock = SlaveLock('build',
maxCount = 2,
maxCountForSlave = {
'farmer-slave1': 2,
})
# All repo
all_repos = {
'quantum_espresso': {
'repository': 'https://gitlab.com/QEF/q-e.git',
'branch': 'develop',
},
# 'sternheimer_gw': {
# 'repository': 'https://github.com/mmdg-oxford/SternheimerGW.git',
# 'branch': 'develop',
# },
'wannier90': {
'repository': 'https://github.com/wannier-developers/wannier90.git',
# 'repository': 'https://github.com/sponce24/wannier90.git',
'branch': 'develop',
},
}
############################################################################
# QE code
############################################################################
self.checkout_qe = [steps.Git(
name="checkout_qe",
method="copy",
repourl=all_repos["quantum_espresso"]["repository"],
branch=all_repos["quantum_espresso"]["branch"],
haltOnFailure = True,
alwaysUseLatest = True,
)]
self.configure_qe = [ShellCommand(
name="configure_qe",
command=["./configure"],
env=Environ,
workdir="build",
locks=[build_lock.access('counting')],
haltOnFailure = True,descriptionDone=["configure_qe"]
)]
self.configure_qe_mp = [ShellCommand(
name="configure_qe",
command=["./configure","--enable-openmp","--enable-parallel"],
env=Environ,
workdir="build",
locks=[build_lock.access('counting')],
haltOnFailure = True,descriptionDone=["configure_qe_mp"]
)]
self.dep_qe = [ShellCommand(
name="dep_qe",
#command=["make","depend"],
# DBSP: Temporary until QE 6.2 is release
command=["ls"],
env=Environ,
workdir="build",
locks=[build_lock.access('counting')],
haltOnFailure = True,descriptionDone=["dep_qe"]
)]
self.env_qe1 = [ShellCommand(
name="env_qe1",
command=Interpolate('sed -i "s/TESTCODE_NPROCS=4/TESTCODE_NPROCS=2/g" ENVIRONMENT'),
env=Environ,
workdir="build/test-suite/",
locks=[build_lock.access('counting')],
haltOnFailure = True,
descriptionDone=["env_qe1"]
)]
self.env_qe2 = [ShellCommand(
name="env_qe2",
command=Interpolate('echo "export OMP_NUM_THREADS=2" >> ENVIRONMENT'),
#command=["cat","'export OMP_NUM_THREADS=2'",">>", "ENVIRONMENT"],
env=Environ,
workdir="build/test-suite/",
locks=[build_lock.access('counting')],
haltOnFailure = True,descriptionDone=["env_qe2"]
)]
self.make_pw = [ShellCommand(
name="make_pw",
command=["make","-j","4","pwall","cp","ld1","upf"],
env=Environ,
workdir="build",
haltOnFailure=True, descriptionDone=["make_pw"],
locks=[build_lock.access('counting')]
)]
self.make_ph = [ShellCommand(
name="make_ph",
command=["make","ph"],
env=Environ,
workdir="build",
haltOnFailure=True, descriptionDone=["make_ph"],
locks=[build_lock.access('counting')]
)]
self.make_epw0 = [ShellCommand(
name="make_epw0",
command=["make"],
env=Environ,
workdir="build/EPW/src/",
haltOnFailure=True, descriptionDone=["make_epw"],
locks=[build_lock.access('counting')]
)]
self.make_epw = [ShellCommand(
name="make_epw",
command=["make","epw"],
env=Environ,
workdir="build",
haltOnFailure=True, descriptionDone=["make_epw"],
locks=[build_lock.access('counting')]
)]
self.make_lr = [ShellCommand(
name="make_lr",
command=["make","-j","8","lrmods"],
env=Environ,
workdir="build",
haltOnFailure=True,
descriptionDone=["make_lr"],
locks=[build_lock.access('counting')],
)]
self.test_clean = [ShellCommand(
name="test_clean",
command=["make", "clean"],
env=Environ,
workdir="build/test-suite",
descriptionDone = ["test_clean"],
locks=[build_lock.access('counting')],
)]
self.clean = [ShellCommand(
command=["make", "veryclean"],
alwaysRun=True,
flunkOnFailure = False,
workdir="build"
)]
self.test0 = [ShellCommand(
name="test_prolog",
command=["make","prolog"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["make prolog"],
locks=[build_lock.access('counting')]
)]
self.test_para_PW = [ShellCommand(
name="PW_para",
command=["make","run-tests-pw-parallel"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["PW para tests"],
locks=[build_lock.access('counting')]
)]
self.test_serial_PW = [ShellCommand(
name="PW_serial",
command=["make","run-tests-pw-serial"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["PW serial tests"],
locks=[build_lock.access('counting')]
)]
self.test_para_CP = [ShellCommand(
name="CP_para",
command=["make","run-tests-cp-parallel"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["CP para tests"],
locks=[build_lock.access('counting')]
)]
self.test_serial_CP = [ShellCommand(
name="CP_serial",
command=["make","run-tests-cp-serial"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["CP serial tests"],
locks=[build_lock.access('counting')]
)]
self.test_para_PH = [ShellCommand(
name="PH_para",
command=["make","run-tests-ph-parallel"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["PH para tests"],
locks=[build_lock.access('counting')]
)]
self.test_serial_PH = [ShellCommand(
name="PH_serial",
command=["make","run-tests-ph-serial"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["PH serial tests"],
locks=[build_lock.access('counting')]
)]
self.test_para_EPW = [ShellCommand(
name="EPW_para",
command=["make","run-tests-epw-parallel"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["EPW para tests"],
locks=[build_lock.access('counting')]
)]
self.test_serial_EPW = [ShellCommand(
name="EPW_serial",
command=["make","run-tests-epw-serial"],
env=Environ,
workdir="build/test-suite",
haltOnFailure=False, descriptionDone=["EPW serial tests"],
locks=[build_lock.access('counting')]
)]
############################################################################
# SGW code
############################################################################
# self.configure_qe2 = [ShellCommand(
# name="configure_qe",
# command=["./configure"],
# env=Environ,
# workdir="build",
# locks=[build_lock.access('counting')],
# haltOnFailure = True,descriptionDone=["configure_qe"]
# )]
#
# self.make_pw2 = [ShellCommand(
# name="make_pw",
# command=["make","pw","lrmods"],
# env=Environ,
# workdir="build",
# haltOnFailure=True, descriptionDone=["make_pw"],
# locks=[build_lock.access('counting')]
# )]
#
# self.checkout_sgw = [steps.Git(
# name="checkout_sgw",
# repourl=all_repos["sternheimer_gw"]["repository"],
# branch=all_repos["sternheimer_gw"]["branch"],
# workdir="build/SGW",
# haltOnFailure = True,
# alwaysUseLatest = True,
# )]
#
# self.make_clean = [ShellCommand(
# name="make_clean",
# command=["make", "clean"],
# env=Environ,
# workdir="build/SGW",
# haltOnFailure = True,
# descriptionDone = ["make_clean"],
# locks=[build_lock.access('counting')],
# )]
#
# self.make_sgw = [ShellCommand(
# name="make_sgw",
# command=["make"],
# env=Environ,
# workdir="build/SGW",
# haltOnFailure = True,
# descriptionDone = ["make_sgw"],
# locks=[build_lock.access('counting')],
# )]
#
# self.test_sgw = [ShellCommand(
# name="test_sgw",
# command=["make", "run-tests"],
# env=Environ,
# workdir="build/SGW/test-suite",
# haltOnFailure = True,
# descriptionDone = ["test_sgw"],
# locks=[build_lock.access('counting')],
# )]
#
# self.test_clean_sgw = [ShellCommand(
# name="test_clean",
# command=["make", "clean"],
# env=Environ,
# workdir="build/SGW/test-suite",
# descriptionDone = ["test_clean"],
# locks=[build_lock.access('counting')],
# )]
############################################################################
# Wannier code
############################################################################
self.checkout_wannier = [steps.Git(
name="checkout_wannier",
method="copy",
workdir="build/WAN",
repourl=all_repos["wannier90"]["repository"],
branch=all_repos["wannier90"]["branch"],
haltOnFailure = True,
alwaysUseLatest = True,
)]
self.cpconfig = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_gcc640_serial.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.cpgcc730 = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_gcc730_openmpi1107.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.cpintel17 = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_intel17_openmpi313.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.cpintel17i = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_intel17_impi.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.cpintel18 = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_intel18_openmpi313.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.cppgi18 = [ShellCommand(
name="cp_config",
command=["cp","test-suite/config/TestFarm/farmer_pgi18_mvapich23b.inc","make.inc"],
env=Environ,
workdir="build/WAN",
haltOnFailure=True, descriptionDone=["cp_config"],
locks=[build_lock.access('counting')]
)]
self.clean_wannier = [ShellCommand(
name="clean_wannier",
command=["make","clean"],
env=Environ,
workdir="build/WAN",
haltOnFailure = True,
descriptionDone = ["clean_wannier"],
locks=[build_lock.access('counting')],
)]
self.clean_tests = [ShellCommand(
name="clean_tests",
command=["python","clean_tests"],
env=Environ,
workdir="build/WAN/test-suite",
haltOnFailure = True,
descriptionDone = ["clean_tests"],
locks=[build_lock.access('counting')],
)]
self.make_wannier = [ShellCommand(
name="make_wannier",
command=["make"],
env=Environ,
workdir="build/WAN",
haltOnFailure = True,
descriptionDone = ["make_wannier"],
locks=[build_lock.access('counting')],
)]
self.make_wannier2 = [ShellCommand(
name="make_wannier2",
command=["make","default","w90chk2chk"],
env=Environ,
workdir="build/WAN",
haltOnFailure = True,
descriptionDone = ["make_wannier2"],
locks=[build_lock.access('counting')],
)]
self.test_wannier_serial = [ShellCommand(
name="test_wannier_seq",
command=["./run_tests","--category=default"],
env=Environ,
workdir="build/WAN/test-suite",
haltOnFailure = True,
descriptionDone = ["test_wannier_seq"],
locks=[build_lock.access('counting')],
)]
self.test_wannier_para = [ShellCommand(
name="test_wannier_para",
command=["./run_tests","--category=default", "--numprocs=4"],
env=Environ,
workdir="build/WAN/test-suite",
haltOnFailure = True,
descriptionDone = ["test_wannier_para"],
locks=[build_lock.access('counting')],
)]
| QEF/q-e_schrodinger | test-suite/buildbot/Cineca_farm/slave.py | Python | gpl-2.0 | 17,087 | [
"EPW",
"Wannier90"
] | 744e443491e1782f40bf1bd86a0da6be1b319a413744b723a11b98a984ca4f0d |
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Example of plotting lines with pyvisi
"""
import sys
numArgs = len(sys.argv)
if numArgs == 1:
ren_mod = "vtk"
else:
ren_mod = sys.argv[1]
# set up some data to plot
from numpy import *
x = arange(10, dtype=floating)
y = x**2
# example code for how a user would write a script in pyvisi
from pyvisi import * # base level visualisation stuff
# import the objects to render the scene using the specific renderer
if ren_mod == "gnuplot":
from pyvisi.renderers.gnuplot import * # gnuplot
elif ren_mod == "vtk":
from pyvisi.renderers.vtk import * # vtk
elif ren_mod == "plplot":
from pyvisi.renderers.plplot import * # plplot
else:
raise ValueError, "Unknown renderer module"
# define the scene object
# a Scene is a container for all of the kinds of things you want to put
# into your plot for instance, images, meshes, arrow/vector/quiver plots,
# contour plots, spheres etc.
scene = Scene()
# create a LinePlot object
plot = LinePlot(scene)
# add some helpful info to the plot
plot.title = 'Example 2D line plot'
plot.xlabel = 'x'
plot.ylabel = 'x^2'
plot.linestyle = 'lines'
# assign some data to the plot
plot.setData(x, y)
# render the scene to screen
scene.render(pause=True, interactive=True)
# save the scene out to file
scene.save(fname="simpleLinePlot.png", format=PngImage())
# vim: expandtab shiftwidth=4:
| paultcochrane/pyvisi | examples/simpleLinePlot.py | Python | gpl-2.0 | 2,124 | [
"VTK"
] | cb6a8d176985723bab26ab1164be3c0ca193d4e8a92ae0955d5649c449225731 |
from collections import OrderedDict
from chempy import Reaction
from chempy.kinetics.rates import MassAction, RadiolyticBase
from chempy.units import to_unitless, default_units as u
def jl_dict(od):
return "Dict([%s])" % ", ".join(["(:%s, %.4g)" % (k, v) for k, v in od.items()])
def _r(r, p, substmap, parmap, *, unit_conc, unit_time, variables=None):
"""
Parameters
==========
...
variables: dict
e.g. dict(doserate=99.9*u.Gy/u.s, density=998*u.kg/u.m3)
"""
(pk,) = r.param.unique_keys
if isinstance(r.param, MassAction):
ratcoeff = to_unitless(p[pk], unit_conc ** (1 - r.order()) / unit_time)
if not r.inact_reac:
r_str = "{}, {}".format(
parmap[pk],
r.string(
substances=substmap,
with_param=False,
Reaction_arrow="-->",
Reaction_coeff_space="",
),
)
else:
all_keys = r.keys()
reac_stoichs = r.all_reac_stoich(all_keys)
act_stoichs = r.active_reac_stoich(all_keys)
rate = "*".join(
[parmap[pk]]
+ [
("%s^%d" % (substmap[k], v)) if v > 1 else substmap[k]
for k, v in zip(all_keys, act_stoichs)
if v > 0
]
)
r2 = Reaction(
dict([(k, v) for k, v in zip(all_keys, reac_stoichs) if v]), r.prod
)
r_str = "{}, {}".format(
rate,
r2.string(
substances=substmap,
with_param=False,
Reaction_arrow="\u21D2",
Reaction_coeff_space="",
),
)
elif isinstance(r.param, RadiolyticBase):
ratcoeff = to_unitless(
p[pk] * variables["doserate"] * variables["density"], unit_conc / unit_time
)
assert not r.reac and not r.inact_reac and not r.inact_prod
((prod, n),) = r.prod.items()
assert n == 1
r_str = ("{}, 0 \u21D2 {}" if ratcoeff > 0 else "{}, {} \u21D2 0").format(
parmap[pk], substmap[prod]
)
else:
raise NotImplementedError("Whats that?")
return r_str, pk, abs(ratcoeff)
class DiffEqBioJl:
_template_body = """\
{name} = @{crn_macro} begin
{reactions}
end {parameters}
{post}
"""
defaults = dict(unit_conc=u.molar, unit_time=u.second)
def __init__(self, *, rxs, pars, substance_key_map, parmap, **kwargs):
self.rxs = rxs
self.pars = pars
self.substance_key_map = substance_key_map
self.parmap = parmap
self.unit_conc = kwargs.get("unit_conc", self.defaults["unit_conc"])
self.unit_time = kwargs.get("unit_time", self.defaults["unit_time"])
@classmethod
def from_rsystem(
cls,
rsys,
par_vals,
*,
variables=None,
substance_key_map=lambda i, sk: "y%d" % i,
**kwargs
):
if not isinstance(substance_key_map, dict):
substance_key_map = {
sk: substance_key_map(si, sk) for si, sk in enumerate(rsys.substances)
}
parmap = dict(
[(r.param.unique_keys[0], "p%d" % i) for i, r in enumerate(rsys.rxns)]
)
rxs, pars = [], OrderedDict()
for r in rsys.rxns:
rs, pk, pv = _r(
r,
par_vals,
substance_key_map,
parmap,
variables=variables,
unit_conc=kwargs.get("unit_conc", cls.defaults["unit_conc"]),
unit_time=kwargs.get("unit_time", cls.defaults["unit_time"]),
)
rxs.append(rs)
if pk in pars:
raise ValueError("Are you sure (sometimes intentional)?")
pars[parmap[pk]] = pv
return cls(
rxs=rxs,
pars=pars,
substance_key_map=substance_key_map,
parmap=parmap,
**kwargs
)
def render_body(self, sparse_jac=False):
name = "rn"
return self._template_body.format(
crn_macro="min_reaction_network" if sparse_jac else "reaction_network",
name=name,
reactions="\n ".join(self.rxs),
parameters=" ".join(self.pars),
post="addodes!({}, sparse_jac=True)".format(name) if sparse_jac else "",
)
def render_setup(self, *, ics, atol, tex=True, tspan=None):
export = ""
export += "p = %s\n" % jl_dict(self.pars)
export += "ics = %s\n" % jl_dict(
OrderedDict(
{
self.substance_key_map[k]: v
for k, v in to_unitless(ics, u.molar).items()
}
)
)
if atol:
export += "abstol_d = %s\n" % jl_dict(
{
self.substance_key_map[k]: v
for k, v in to_unitless(atol, u.molar).items()
}
)
export += (
"abstol = Array([get(abstol_d, k, 1e-10) for k=keys(speciesmap(rn))])"
)
if tex:
export += "subst_tex = Dict([%s])\n" % ", ".join(
'(:%s, ("%s", "%s"))' % (v, k, k.latex_name)
for k, v in self.substance_key_map.items()
)
if tspan:
export += """\
tspan = (0., %12.5g)
u0 = Array([get(ics, k, 1e-28) for k=keys(speciesmap(rn))])
parr = Array([p[k] for k=keys(paramsmap(rn))])
oprob = ODEProblem(rn, u0, tspan, parr)
"""
return export
def render_solve(self):
return (
"sol = solve(oprob, reltol=1e-9, abstol=abstol, Rodas5(),"
" callback=PositiveDomain(ones(length(u0)), abstol=abstol))"
)
| bjodah/aqchem | chempy/util/_julia.py | Python | bsd-2-clause | 5,909 | [
"ChemPy"
] | 0af518908e2dfc1e012401cecc10bdf6e57c6ae12be3e834d5bd6eeeddba428d |
########################################################################
#
# (C) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
import yaml
import time
import re
import shutil
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False,
help='Initialize the skeleton role with default contents for a Container Enabled role.')
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=None,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str,
default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg '
'file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
role_skeleton = self.get_opt('role_skeleton', C.GALAXY_ROLE_SKELETON)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='1.2',
container_enabled=self.options.container_enabled
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = list(map(lambda x: re.compile(x), skeleton_ignore_expressions))
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = filter(lambda d: not any(map(lambda r: r.match(os.path.join(rel_root, d)), skeleton_ignore_re)), dirs)
for f in files:
filename, ext = os.path.splitext(f)
if any(map(lambda r: r.match(os.path.join(rel_root, f)), skeleton_ignore_re)):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
### FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
map(RoleRequirement.role_yaml_parse,
yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']),color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
display.display(resp['status'])
return True
| ashemedai/ansible | lib/ansible/cli/galaxy.py | Python | gpl-3.0 | 31,861 | [
"Galaxy"
] | 15aae5c88c28d2e75ffeae8ace88ec4fe225498e9561486f718540f118cb11b2 |
# Copyright: (c) 2013, James Cammarata <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import optparse_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = frozenset(("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup"))
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.set_description("Import a role.")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
self.parser.set_description("View more details about a specific role.")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.set_description("Initialize new role with the base structure of a role.")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
self.parser.set_description("Delete a role from roles_path.")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
self.parser.set_description("Manage the integration between Galaxy and the given source.")
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=opt_help.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
if self.action == "install":
self.parser.add_option('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing role and it's dependencies")
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(sorted(self.VALID_ACTIONS)),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
desc="Perform various Role related operations.",
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
def post_process_args(self, options, args):
options, args = super(GalaxyCLI, self).post_process_args(options, args)
display.verbosity = options.verbosity
return options, args
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
self.api = GalaxyAPI(self.galaxy)
self.execute()
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
role_skeleton = context.CLIARGS['role_skeleton']
role_name = context.CLIARGS['args'][0].strip() if context.CLIARGS['args'] else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='2.4',
role_type=context.CLIARGS['role_type']
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if not context.CLIARGS['args']:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if no_deps and force_deps:
raise AnsibleOptionsError("You cannot both force dependencies and no dependencies")
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file (%s): %s" % (role_file, to_native(e)))
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(context.CLIARGS['args']) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['args']:
# show the requested role, if it exists
name = context.CLIARGS['args'][0]
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via GitHub and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_user = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['args'][1], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with GitHub repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from GitHub or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(context.CLIARGS['args']) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
source = context.CLIARGS['args'][0]
github_user = context.CLIARGS['args'][1]
github_repo = context.CLIARGS['args'][2]
secret = context.CLIARGS['args'][3]
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_user = context.CLIARGS['args'][0]
github_repo = context.CLIARGS['args'][1]
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
| alxgu/ansible | lib/ansible/cli/galaxy.py | Python | gpl-3.0 | 34,060 | [
"Galaxy"
] | 159d18e7132b41a9dc30fd4ed588b3e1f25c54e5f52edf58312f725a5ae50d24 |
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import ndb
from google.appengine.ext.ndb import polymodel
from google.appengine.api import mail
import string
import random
def pin_gen(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class TUser(ndb.Model):
"""Models a user account"""
g_user = ndb.UserProperty()
nickname = ndb.StringProperty()
full_name = ndb.StringProperty()
current_institution = ndb.StringProperty()
public_profile = ndb.BooleanProperty()
custom_email = ndb.StringProperty()
email_code = ndb.StringProperty()
email_verified = ndb.BooleanProperty()
phone = ndb.StringProperty()
basic_info = ndb.ComputedProperty(lambda self: self.full_name and self.phone)
def preferredEmail(self):
if self.email_verified and self.custom_email:
return self.custom_email
else:
return self.g_user.email()
def change_email(self, email):
self.email_verified = False
self.custom_email = email
self.email_code = pin_gen()
self.put()
#Send a verification email
message = mail.EmailMessage(sender="Tournatrack <[email protected]",
subject="Verify your email address")
message.to = self.full_name + ' <' + email + '>'
message.body = """
This email address has been added to an account on Tournatrack. We will only
use it if you take your verification code, and input it to your Account Settings page.
Your verification code is: %s
Later!
-The Tournatrack Lizards
"""%self.email_code
message.send()
def verify_email(self, code):
if self.email_code == code:
self.email_verified = True
self.email_code = None
self.put()
class Tournament(ndb.Model):
"""Models an individual tournament"""
name = ndb.StringProperty()
owner = ndb.KeyProperty(kind='TUser', repeated=True)
trackpin = ndb.StringProperty()
start = ndb.DateProperty()
end = ndb.DateProperty()
customRoomStatus = ndb.StringProperty(repeated=True)
#Information to be displayed
blurb = ndb.TextProperty()
facebook = ndb.StringProperty()
homepage = ndb.StringProperty()
contact_email = ndb.StringProperty()
#Return the blurb with newlines
def blurbDisplay(self):
if self.blurb:
r = '<br />'
return self.blurb.replace('\r\n',r).replace('\n\r',r).replace('\r',r).replace('\n',r)
else:
return None
#Return a list of rooms attached to the tournament
def rooms(self):
return Room.query(ancestor=self.key).order(Room.name)
#Return the pre-registration record for the tournament
def preRegRecord(self):
return PreRegRecord.query(ancestor=self.key)
#Delete the tournament and its attached rooms and registration record
def destroy(self):
for r in self.rooms():
r.key.delete()
preReg = self.preRegRecord().get()
if preReg:
preReg.destroy()
self.key.delete()
class Room(ndb.Model):
"""Models a room in a tournament"""
name = ndb.StringProperty()
active = ndb.BooleanProperty()
status = ndb.StringProperty()
changed = ndb.TimeProperty()
comment = ndb.StringProperty()
class PreRegRecord(ndb.Model):
"""Models the pre-registration of a tournament"""
open = ndb.BooleanProperty()
teamCap = ndb.IntegerProperty()
#Delete the object and its associated judges, teams, and institutions
def destroy(self):
for j in self.indyJudges():
j.key.delete()
for t in self.teams():
t.key.delete()
for i in self.institutions():
i.destroy()
self.key.delete()
def isRegistered(self, tuser):
if tuser:
return self.isInstitution(tuser) or self.isJudge(tuser) or self.isOpenTeam(tuser)
#Independent Judges registered
def indyJudges(self):
return RegisteredIndependentJudge.query(ancestor=self.key)
#Check if a user is registered as a judge
def isJudge(self, tuser):
if tuser:
q = RegisteredIndependentJudge.query(ancestor=self.key)
q = q.filter(RegisteredIndependentJudge.user == tuser.key)
return q.get()
else:
return None
#Check if a user is registered a an Open Team
def isOpenTeam(self, tuser):
if tuser:
q = RegisteredOpenTeam.query(ancestor=self.key)
q = q.filter(RegisteredOpenTeam.user == tuser.key)
return q.get()
else:
return None
#Check if a user is registered as an institution
def isInstitution(self, tuser):
if tuser:
q = RegisteredInstitution.query(ancestor=self.key)
q = q.filter(RegisteredInstitution.user == tuser.key)
return q.get()
else:
return None
#Teams registered
def teams(self):
return RegisteredOpenTeam.query(ancestor=self.key)
#Institutions registered
def institutions(self):
return RegisteredInstitution.query(ancestor=self.key)
#Return the total number of teams, independent + institutional
def totalTeamCount(self):
nTeams = self.teams().count(limit=1000)
for i in self.institutions():
nTeams = nTeams + i.teams().count(limit=1000)
return nTeams
#Return the total number of judges, independent + institutional
def totalJudgeCount(self):
nJudges = self.indyJudges().count(limit=1000)
for i in self.institutions():
nJudges = nJudges + i.judges().count(limit=1000)
return nJudges
"""Superclass for registered judges, teams, institutions"""
class RegisteredEntity(polymodel.PolyModel):
user = ndb.KeyProperty(kind='TUser')
def tournament(self):
return self.key.parent().parent()
def email(self):
return self.user.get().preferredEmail()
class RegisteredIndependentJudge(RegisteredEntity):
"""Models a participant in the tournament"""
name = ndb.ComputedProperty(lambda self: self.user.get().full_name)
phone = ndb.ComputedProperty(lambda self: self.user.get().phone)
cv_public = ndb.ComputedProperty(lambda self: self.user.get().public_profile)
def prefs(self):
return RegisteredPreferences.query(ancestor=self.key).get()
#Check if the user is authorised to modify
def authorised(self, tuser):
#Check if they own the object
if self.user == tuser.key:
return True
#Check if they own the tournament
elif tuser.key in self.key.parent().parent().get().owner:
return True
else:
return False
class RegisteredOpenTeam(RegisteredEntity):
"""Models an open team in the tournament"""
leadName = ndb.ComputedProperty(lambda self: self.user.get().full_name)
phone = ndb.ComputedProperty(lambda self: self.user.get().phone)
teamName = ndb.StringProperty()
sp1Key = ndb.KeyProperty(kind='TUser')
sp2Key = ndb.KeyProperty(kind='TUser')
sp1Name = ndb.StringProperty()
sp2Name = ndb.StringProperty()
#Speaker names accounting for linked accounts
def speaker1(self):
if self.sp1Key:
return self.sp1Key.get().full_name
else:
return self.sp1Name
def speaker2(self):
if self.sp2Key:
return self.sp2Key.get().full_name
else:
return self.sp2Name
sp1ESL = ndb.BooleanProperty()
sp2ESL = ndb.BooleanProperty()
sp1Novice = ndb.BooleanProperty()
sp2Novice = ndb.BooleanProperty()
def authorised(self, tuser):
if self.user == tuser.key:
return True
elif tuser.key in self.key.parent().parent().get().owner:
return True
else:
return False
def institutionName(self):
return "Composite"
#Link a speaker to a registered team
def linkSpeaker(self, speaker, dID):
key = ndb.Key('TUser', int(dID))
debater = key.get()
if debater.basic_info:
if speaker == 1:
self.sp1Key = key
elif speaker == 2:
self.sp2Key = key
else:
return False
self.put()
return True
else:
return False
def linkSpeaker2(self, dID):
def linkSpeaker1(self, dID):
key = ndb.Key('TUser', int(dID))
debater = key.get()
if debater.basic_info:
self.sp1Key = key
return True
else:
return False
class RegisteredInstitution(RegisteredEntity):
"""Models an institution registered for the tournment"""
name = ndb.StringProperty()
leadName = ndb.ComputedProperty(lambda self: self.user.get().full_name)
phone = ndb.ComputedProperty(lambda self: self.user.get().phone)
#Return a query of all of the teams attached to the institution
def teams(self):
return InstitutionTeam.query(ancestor=self.key)
#Return a query of all of the judges attached to the institution
def judges(self):
return InstitutionJudge.query(ancestor=self.key)
#Check whether they are authorised to edit
def authorised(self, tuser):
return self.user == tuser.key or tuser.key in self.key.parent().parent().get().owner
#Delete the institution along with its teams and judges
def destroy(self):
for judge in self.judges():
judge.key.delete()
for team in self.teams():
team.key.delete()
self.key.delete()
class InstitutionTeam(ndb.Model):
"""A team attached to an institution"""
teamName = ndb.StringProperty()
sp1Name = ndb.StringProperty()
sp2Name = ndb.StringProperty()
sp1Key = ndb.KeyProperty(kind='TUser')
sp2Key = ndb.KeyProperty(kind='TUser')
sp1ESL = ndb.BooleanProperty()
sp2ESL = ndb.BooleanProperty()
sp1Novice = ndb.BooleanProperty()
sp2Novice = ndb.BooleanProperty()
#The user of the institution responsible for the team
@property
def user(self):
return self.key.parent().get().user
#Speaker names accounting for linked accounts
def speaker1(self):
if self.sp1Key:
return self.sp1Key.get().full_name
else:
return self.sp1Name
def speaker2(self):
if self.sp2Key:
return self.sp2Key.get().full_name
else:
return self.sp2Name
#Link a speaker to a registered team
def linkSpeaker(self, speaker, dID):
key = ndb.Key('TUser', int(dID))
debater = key.get()
if debater.basic_info:
if speaker == 1:
self.sp1Key = key
elif speaker == 2:
self.sp2Key = key
else:
return False
self.put()
return True
else:
return False
def linkSpeaker2(self, dID):
def linkSpeaker1(self, dID):
key = ndb.Key('TUser', int(dID))
debater = key.get()
if debater.basic_info:
self.sp1Key = key
return True
else:
return False
def authorised(self, tuser):
return self.key.parent().get().user == tuser.key or tuser.key in self.key.parent().parent().parent().get().owner
def institutionName(self):
return self.key.parent().get().name
class InstitutionJudge(ndb.Model):
"""A judge attached to an institution"""
name = ndb.StringProperty()
#Check if the user is authorised to modify
def authorised(self, tuser):
#Check if they own the object
if self.key.parent().get().user == tuser.key:
return True
#Check if they are the tournament owner
elif tuser.key in self.key.parent().parent().parent().get().owner:
return True
else:
return False
class RegisteredPreferences(ndb.Model):
"""The preferences of a registered participant"""
vegetarian = ndb.BooleanProperty()
glutenfree = ndb.BooleanProperty()
vegan = ndb.BooleanProperty()
halal = ndb.BooleanProperty()
kosher = ndb.BooleanProperty()
special = ndb.StringProperty()
class PerfSpeakerRecord(ndb.Model):
"""The performance record of a speaker at a tournament"""
tournament = ndb.StringProperty()
startDate = ndb.DateProperty()
teamRank = ndb.IntegerProperty()
averageSpeaks = ndb.FloatProperty()
speakerRank = ndb.IntegerProperty()
champion = ndb.BooleanProperty()
finalist = ndb.BooleanProperty()
semifinalist = ndb.BooleanProperty()
quarterfinalist = ndb.BooleanProperty()
octofinalist = ndb.BooleanProperty()
doubleoctofinalist = ndb.BooleanProperty()
ESLBreak = ndb.BooleanProperty()
ESLChampion = ndb.BooleanProperty()
EFLBreak = ndb.BooleanProperty()
EFLChampion = ndb.BooleanProperty()
NoviceBreak = ndb.BooleanProperty()
NoviceChampion = ndb.BooleanProperty()
isWin = ndb.BooleanProperty()
isBreak = ndb.BooleanProperty()
class PerfJudgeRecord(ndb.Model):
"""The performance record of a judge at a tournament"""
tournament = ndb.StringProperty()
startDate = ndb.DateProperty()
chair = ndb.BooleanProperty()
broke = ndb.BooleanProperty()
outroundChair = ndb.BooleanProperty()
CA = ndb.BooleanProperty()
DCA = ndb.BooleanProperty()
equity = ndb.BooleanProperty()
isAchievement = ndb.BooleanProperty()
def isCAShip(self):
response = self.DCA or self.CA
return response
| sarrionandia/tournatrack | models.py | Python | apache-2.0 | 12,570 | [
"Brian"
] | 0ab2cf0f25cb4e09a3decab0918b2d96a3c1535d308e8a7897aaa22f14f89fa0 |
#!/usr/bin/env python
import askap.analysis.evaluation
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import math
import scipy.special
from askap.analysis.evaluation.readData import *
import askap.analysis.evaluation.modelcomponents as models
from askap.analysis.evaluation.sourceSelection import *
import pyfits
import pywcs
import os
from optparse import OptionParser
import askap.parset as parset
def labelPlot(xlab, ylab, title,textsize):
plt.tick_params(labelsize=textsize)
plt.xlabel(xlab,fontsize=textsize)
plt.ylabel(ylab,fontsize=textsize)
plt.title(title,fontsize=textsize)
#############
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c","--config", dest="inputfile", default="", help="Input parameter file [default: %default]")
(options, args) = parser.parse_args()
if(options.inputfile==''):
inputPars = parset.ParameterSet()
elif(not os.path.exists(options.inputfile)):
logging.warning("Config file %s does not exist! Using default parameter values."%options.inputfile)
inputPars = parset.ParameterSet()
else:
inputPars = parset.ParameterSet(options.inputfile).Eval
threshImageName=inputPars.get_value('thresholdImage','detectionThreshold.i.clean.fits')
noiseImageName=inputPars.get_value('noiseImage','noiseMap.i.clean.fits')
snrImageName=inputPars.get_value('snrImage','snr.i.clean.fits')
skymodelCatalogue=inputPars.get_value('refCatalogue','skyModel-catalogue.txt')
skymodelCatType = inputPars.get_value('refCatalogueType','Selavy')
skymodelOrigCat=inputPars.get_value('origCatalogue','')
skymodelOrigCatIsPrecessed=inputPars.get_value('origCatalogueIsPrecessed','false')
sourceCatalogue=inputPars.get_value('sourceCatalogue','selavy-fitResults.txt')
sourceCatType = inputPars.get_value('sourceCatalogueType','Selavy')
matchfile = inputPars.get_value('matchfile','matches.txt')
if not os.path.exists(threshImageName):
print "Threshold image %s does not exist. Exiting."%threshImageName
exit(0)
if not os.path.exists(noiseImageName):
print "Noise image %s does not exist. Exiting."%noiseImageName
exit(0)
if not os.path.exists(threshImageName):
print "SNR image %s does not exist. Exiting."%snrImageName
exit(0)
threshim=pyfits.open(threshImageName)
threshmapFull=threshim[0].data
goodpixels=threshmapFull>0.
threshmap = threshmapFull[goodpixels]
threshHeader = threshim[0].header
threshWCS = pywcs.WCS(threshHeader)
threshim.close()
# Tools used to determine whether a given missed reference source should be included
selector = sourceSelector(inputPars)
# this one is used for the original catalogue when it has not been precessed.
selectorUnprecessed = sourceSelector(inputPars)
selectorUnprecessed.setWCSreference(0.,0.)
noiseim=pyfits.open(noiseImageName)
noisemapFull=noiseim[0].data
noisemap = noisemapFull[goodpixels]
noiseim.close()
snrim = pyfits.open(snrImageName)
snrmapFull=snrim[0].data
snrmap = snrmapFull[goodpixels]
snrim.close()
#########################################
# Noise map distribution
plt.figure(num=2, figsize=(12.,12.), dpi=72)
plt.subplots_adjust(wspace=0.4, hspace=0.6)
plt.subplot(321)
plt.hist(noisemap,bins=50)
labelPlot('Pixel value','Count','Noise map histogram','x-small')
plt.subplot(322)
plt.hist(noisemap,bins=50,log=True)
labelPlot('Pixel value','Count','Noise map histogram','x-small')
#########################################
# Threshold map distribution
plt.subplot(323)
plt.hist(threshmap,bins=50)
labelPlot('Pixel value','Count','Threshold map histogram','x-small')
plt.subplot(324)
plt.hist(threshmap,bins=50,log=True)
labelPlot('Pixel value','Count','Threshold map histogram','x-small')
#########################################
# SNR map distribution
plt.subplot(325)
plt.hist(snrmap[abs(snrmap)<100],bins=50)
labelPlot('Pixel value','Count','SNR map histogram','x-small')
plt.subplot(326)
plt.hist(snrmap[abs(snrmap)<100],bins=50,log=True)
labelPlot('Pixel value','Count','SNR map histogram','x-small')
plt.savefig('histograms.png')
#########################################
# SNR map distribution
numPixAboveSNR=[]
numPixBelowNegSNR=[]
logSNRlevel=np.arange(5*math.log10(20.)+1)/5.
snrLevel=10**logSNRlevel
for snr in snrLevel:
numPixAboveSNR.append(snrmap[snrmap>snr].size)
numPixBelowNegSNR.append(snrmap[snrmap<-snr].size)
numPixAboveSNR=np.array(numPixAboveSNR,dtype=float)
numPixBelowNegSNR=np.array(numPixBelowNegSNR,dtype=float)
plt.figure(num=3,figsize=(8,8),dpi=72)
plt.loglog()
plt.plot(snrLevel,numPixAboveSNR,'b-',lw=3,label='Positive signal')
plt.plot(snrLevel,numPixBelowNegSNR,'r-',lw=3,label='Negative signal')
# Want to plot the theoretical curve expected from Gaussian noise.
x=np.arange(1000)*10./1000
y=np.zeros(x.size)
for i in range(x.size):
y[i] = 0.5 * snrmap.size * scipy.special.erfc(x[i]/math.sqrt(2.))
plt.plot(x,y,'g:',label='Gaussian noise')
plt.xlim(0.8,30)
plt.ylim(1,1.e7)
labelPlot('Signal-to-noise ratio','Number of pixels exceeding SNR','SNR pixel distribution','medium')
plt.legend()
plt.savefig('snrCounts.png')
#####################################
# source counts
# fin=open(sourceCatalogue)
# sourcelist=[]
# for line in fin:
# if line[0]!='#':
# sourcelist.append(models.SelavyObject(line))
# fin.close()
sourcecat = readCat(sourceCatalogue,sourceCatType)
sourcelist = sourcecat.values()
# list of log10(flux) points - the middle of the bins
minFlux = inputPars.get_value('sourceCounts.minFlux',1.e-4)
maxFlux = inputPars.get_value('sourceCounts.maxFlux',10.)
logbinwidth=inputPars.get_value('sourceCounts.logBinWidth',0.2)
logfluxpts=np.arange((math.log10(maxFlux)-math.log10(minFlux))/logbinwidth)*logbinwidth + math.log10(minFlux)
fluxpts=10**logfluxpts
fluxbinwidths=fluxpts*10**(logbinwidth/2.)-fluxpts/10**(logbinwidth/2.)
counts=np.zeros(fluxpts.size)
countsPerArea = np.zeros(fluxpts.size)
countsMatch=np.zeros(fluxpts.size)
countsMatchPerArea = np.zeros(fluxpts.size)
# area of a single pixel, in deg^2
pixelarea=abs(threshWCS.wcs.cdelt[0:2].prod())
pixelunit=threshWCS.wcs.cunit[0].strip()
if pixelunit == 'deg':
pixelarea = pixelarea * (math.pi/180.)**2
fullFieldArea = threshmap.size * pixelarea
# get list of matches
fin=open(matchfile)
matchedSources=[]
for line in fin:
matchedSources.append(line.split()[1])
matchedSources=np.array(matchedSources)
for source in sourcelist:
flux=source.flux()
loc=int((math.log10(flux)+4+0.1)*5)
if loc>=0 and loc<fluxpts.size:
counts[loc] = counts[loc]+1
sourceDetArea = pixelarea * threshmap[threshmap<source.peak()].size
#sourceDetArea = fullFieldArea
countsPerArea[loc] = countsPerArea[loc] + 1./sourceDetArea
if (matchedSources==source.id).any():
countsMatch[loc] = countsMatch[loc] + 1
countsMatchPerArea[loc] = countsMatchPerArea[loc] + 1./sourceDetArea
##########
# Sky model comparison
# fin=open(skymodelCatalogue)
# skymodellist=[]
# for line in fin:
# if line[0]!='#':
# skymodellist.append(models.SelavyObject(line))
# fin.close()
skymodelCat = readCat(skymodelCatalogue,skymodelCatType)
skymodellist = skymodelCat.values()
countsSM=np.zeros(fluxpts.size)
countsPerAreaSM = np.zeros(fluxpts.size)
selector.setFluxType("peak")
for source in skymodellist:
if selector.isGood(source):
flux=source.flux()
loc=int((math.log10(flux)+4+0.1)*5)
sourceDetArea = pixelarea * threshmap[threshmap<source.peak()].size
#sourceDetArea = fullFieldArea
if loc >= 0 and loc < countsSM.size and sourceDetArea > 0.:
countsSM[loc] = countsSM[loc]+1
countsPerAreaSM[loc] = countsPerAreaSM[loc] + 1./sourceDetArea
#########
# original sky model comparison, if requested
if not skymodelOrigCat == '':
fin=open(skymodelOrigCat)
origskymodellist=[]
for line in fin:
if not line[0] == '#':
origskymodellist.append(models.FullStokesS3SEXObject(line))
fin.close()
countsSMorig=np.zeros(fluxpts.size)
countsPerAreaSMorig = np.zeros(fluxpts.size)
selector.setFluxType("int")
for source in origskymodellist:
if (skymodelOrigCatIsPrecessed and selectorUnprecessed.isgood(source)) or (not skymodelOrigCatIsPrecessed and selector.isGood(source)):
flux=source.flux()
loc=int((math.log10(flux)+4+0.1)*5)
#sourceDetArea = fullFieldArea
sourceDetArea = pixelarea * threshmap[threshmap<source.flux()].size
if loc >= 0 and loc < countsSMorig.size and sourceDetArea > 0. :
countsSMorig[loc] = countsSMorig[loc]+1
countsPerAreaSMorig[loc] = countsPerAreaSMorig[loc] + 1./sourceDetArea
plt.figure(num=4,figsize=(8.,8.),dpi=72)
#plt.subplot(428)
plt.loglog()
shift=1.
if not skymodelOrigCat == '':
n=countsPerAreaSMorig * fluxpts**2.5 / fluxbinwidths
plt.plot(fluxpts*shift,n,'g-',label='_nolegend_')
plt.plot(fluxpts*shift,n,'go',label='Original Sky model')
plt.errorbar(fluxpts*shift,n,yerr=np.sqrt(countsSMorig)*fluxpts**2.5/fullFieldArea,xerr=None,fmt='g-',label='_nolegend_')
n=countsPerAreaSM * fluxpts**2.5 / fluxbinwidths
# for i in range(len(fluxpts)):
# print "%d: %f %6d %f %f %d"%(i,fluxpts[i],countsSM[i],countsPerAreaSM[i],n[i],threshmap[threshmap<fluxpts[i]].size)
plt.plot(fluxpts/shift,n,'r-',label='_nolegend_')
plt.plot(fluxpts/shift,n,'ro',label='Sky model')
plt.errorbar(fluxpts/shift,n,yerr=np.sqrt(countsSM)*fluxpts**2.5/fullFieldArea,xerr=None,fmt='r-',label='_nolegend_')
n=countsPerArea * fluxpts**2.5 / fluxbinwidths
plt.plot(fluxpts,n,'b-',label='_nolegend_')
plt.plot(fluxpts,n,'bo',label='Detected Components')
plt.errorbar(fluxpts,n,yerr=np.sqrt(counts)*fluxpts**2.5/fullFieldArea,xerr=None,fmt='b-',label='_nolegend_')
n=countsMatchPerArea * fluxpts**2.5 / fluxbinwidths
plt.plot(fluxpts,n,'g-',label='_nolegend_')
plt.plot(fluxpts,n,'go',label='Detected & Matched Components')
plt.errorbar(fluxpts,n,yerr=np.sqrt(countsMatch)*fluxpts**2.5/fullFieldArea,xerr=None,fmt='g-',label='_nolegend_')
# This will plot the analytic polynomial fit to the 1.4GHz source
# counts from Hopkins et al (2003) (AJ 125, 465)
showHopkinsPoly=inputPars.get_value("sourceCounts.showHopkinsPolynomial",False)
if showHopkinsPoly:
hopkinsPolyCoeffs=[-0.008,0.057,-0.121,-0.049,0.376,0.508,0.859]
hopkinsPoly=np.poly1d(hopkinsPolyCoeffs)
hopkins=10**hopkinsPoly(np.log10(fluxpts*1000.))
plt.plot(fluxpts,hopkins,'k:',label='Hopkins et al (2003)')
plt.ylim(1.e-1,1.e5)
labelPlot('S [Jy]', r'$S^{5/2}n(S)$ [ Jy$^{3/2}$ sr$^{-1}$ ]','Differential source counts','medium')
plt.legend(loc='best')
plt.savefig('sourceCounts.png')
if not skymodelOrigCat == '':
print "Source counts: Numbers in each bin, for Components | Matched components | Skymodel | Original sky model"
else:
print "Source counts: Numbers in each bin, for Components | Matched components | Skymodel "
for i in range(len(fluxpts)):
if not skymodelOrigCat == '':
print "%f: %6d %6d %6d %6d"%(fluxpts[i],counts[i],countsMatch[i],countsSM[i],countsSMorig[i])
else:
print "%f: %6d %6d %6d"%(fluxpts[i],counts[i],countsMatch[i],countsSM[i])
plt.close()
| ATNF/askapsdp | Code/Components/Analysis/evaluation/current/scripts/imageEval.py | Python | gpl-2.0 | 12,197 | [
"Gaussian"
] | 7c1892579a08d810bf80e9cef461cc6b158f929c5aedd302da885ab6cb44ca55 |
"""Test Payment for Family Camp Bookings.
Usage:
test_pay_for_camp.py <email> <adults> <children> <infants>
Options:
-d,--debug Turn on debug output.
-h,--help Show this screen.
--version Show version.
"""
from splinter import Browser
from time import sleep
from docopt import docopt
URL = "http://www.7thlichfield.org.uk/family-camp/"
PP_EMAIL = "[email protected]"
PP_PASSWD = "ZdIcQM9L2Qfl"
def interact():
import code
code.InteractiveConsole(locals=globals()).interact()
def pay(email, adults, children, infants):
with Browser() as browser:
browser.visit(URL)
browser.is_element_present_by_xpath(
"//input[@value='Go to PayPal checkout']")
browser.fill('custom', email)
browser.fill('quantity_1', adults)
browser.fill('quantity_2', children)
browser.fill('quantity_3', infants)
browser.find_by_xpath(
"//input[@value='Go to PayPal checkout']").click()
browser.find_by_xpath('//input[@name="login_button"]').click()
sleep(3)
browser.fill('login_email', PP_EMAIL)
browser.fill('login_password', PP_PASSWD)
browser.find_by_xpath('//input[@id="submitLogin"]').click()
browser.is_element_present_by_xpath('//input[@value="Pay Now"]',
wait_time=10)
browser.find_by_xpath('//input[@value="Pay Now"]').click()
browser.is_element_present_by_xpath('//strong[starts-with(.,"You just completed")]',
wait_time=10)
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
pay(args['<email>'],
args['<adults>'],
args['<children>'],
args['<infants>'])
| hippysurfer/family-camp | family_camp/test/test_pay_for_camp.py | Python | mit | 1,811 | [
"VisIt"
] | 959e6ff67d5999c2d039ed6f2788f9b26624e78d984af1e04f558628706ed35d |
import os, sys, shutil
from pymatgen.io.vasp import Poscar, Kpoints, Potcar, VaspInput
from pymatgen import Structure
from pymatgen.apps.borg.hive import SimpleVaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
from ctypes import *
from mpi4py import MPI
from timeit import default_timer as timer
import subprocess
import time
import numpy as np
class test_runner(object):
def submit(self, structure, output_dir, seldyn_arr=None):
poscar = Poscar(structure=structure,selective_dynamics=seldyn_arr)
try:
os.mkdir(output_dir)
except FileExistsError:
pass
poscar.write_file(output_dir+"/POSCAR.vasp")
return 0, structure
class vasp_runner(object):
def __init__(self, base_input_dir, path_to_vasp, nprocs_per_vasp, comm, perturb=0):
self.base_vasp_input = VaspInput.from_directory(base_input_dir)
self.path_to_vasp = path_to_vasp
self.nprocs_per_vasp = nprocs_per_vasp
self.comm = comm
self.vasp_run = vasp_run_mpispawn(path_to_vasp, nprocs_per_vasp, comm)
self.drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
self.queen = BorgQueen(self.drone)
self.perturb = perturb
def submit(self, structure, output_dir, seldyn_arr=None):
if self.perturb:
structure.perturb(self.perturb)
poscar = Poscar(structure=structure,selective_dynamics=seldyn_arr)
vaspinput = self.base_vasp_input
vaspinput.update({'POSCAR':poscar})
self.vasp_run.submit(vaspinput, output_dir)
#queen = BorgQueen(self.drone)
self.queen.serial_assimilate(output_dir)
results = self.queen.get_data()[-1]
return np.float64(results.energy), results.structure
class vasp_runner_multistep(object):
def __init__(self, base_input_dirs, path_to_vasp, nprocs_per_vasp, comm, perturb=0):
self.vasp_runners = []
assert len(base_input_dirs) > 1
self.vasp_runners.append(vasp_runner(base_input_dirs[0], path_to_vasp, nprocs_per_vasp, comm, perturb))
for i in range(1,len(base_input_dirs)):
self.vasp_runners.append(vasp_runner(base_input_dirs[i], path_to_vasp, nprocs_per_vasp, comm, perturb=0))
def submit(self, structure, output_dir, seldyn_arr=None):
energy, newstructure = self.vasp_runners[0].submit(structure, output_dir, seldyn_arr)
for i in range(1, len(self.vasp_runners)):
energy, newstructure = self.vasp_runners[i].submit(newstructure, output_dir, seldyn_arr)
return energy, newstructure
def submit_bulkjob(vasprundirs, path_to_vasp, n_mpiprocs, n_ompthreads):
joblist = open("joblist.txt", "w")
if n_ompthreads != 1:
progtype = "H"+str(n_ompthreads)
else:
progtype = "M"
for vasprundir in vasprundirs:
joblist.write(
";".join([path_to_vasp, str(n_mpiprocs), progtype, vasprundir]) + "\n")
stdout = open("stdout.log", "w")
stderr = open("stderr.log", "w")
stdin = open(os.devnull, "r")
joblist.flush()
start = timer()
p = subprocess.Popen("bulkjob ./joblist.txt", stdout=stdout, stderr=stderr, stdin=stdin,
shell=True)
exitcode = p.wait()
end = timer()
print("it took ",end-start," secs. to start vasp and finish")
sys.stdout.flush()
return exitcode
class vasp_run_mpibulkjob:
def __init__(self, path_to_spawn_ready_vasp, nprocs, comm):
self.path_to_vasp = path_to_spawn_ready_vasp
self.nprocs = nprocs
self.comm = comm
self.commsize = comm.Get_size()
self.commrank = comm.Get_rank()
def submit(self, VaspInput, output_dir):
VaspInput.write_input(output_dir=output_dir)
vasprundirs = self.comm.gather(output_dir, root=0)
exitcode = 1
if self.commrank == 0:
exitcode = np.array([submit_bulkjob(
vasprundirs, self.path_to_vasp, self.nprocs, 1)])
for i in range(1, self.commsize):
self.comm.Isend([exitcode, MPI.INT], dest=i, tag=i)
else:
exitcode = np.array([0])
while not self.comm.Iprobe(source=0, tag=self.commrank):
time.sleep(0.2)
self.comm.Recv([exitcode, MPI.INT], source=0, tag=self.commrank)
return exitcode[0]
class vasp_run_mpispawn:
def __init__(self, path_to_spawn_ready_vasp, nprocs, comm):
self.path_to_vasp = path_to_spawn_ready_vasp
self.nprocs = nprocs
self.comm = comm
self.commsize = comm.Get_size()
self.commrank = comm.Get_rank()
commworld = MPI.COMM_WORLD
self.worldrank = commworld.Get_rank()
def submit(self, VaspInput, output_dir, rerun=2):
VaspInput.write_input(output_dir=output_dir)
#cwd = os.getcwd()
#os.chdir(output_dir)
# Barrier so that spawn is atomic between processes.
# This is to make sure that vasp processes are spawned one by one according to
# MPI policy (hopefully on adjacent nodes)
# (might be MPI implementation dependent...)
#for i in range(self.commsize):
# self.comm.Barrier()
# if i == self.commrank:
failed_dir = []
vasprundirs = self.comm.gather(output_dir,root=0)
#print(self.commrank)
if self.commrank == 0:
start = timer()
commspawn = [MPI.COMM_SELF.Spawn(self.path_to_vasp, #/home/issp/vasp/vasp.5.3.5/bin/vasp",
args=[vasprundir,],
maxprocs=self.nprocs) for vasprundir in vasprundirs]
end = timer()
print("rank ",self.worldrank," took ", end-start, " to spawn")
sys.stdout.flush()
start = timer()
exitcode = np.array(0, dtype=np.intc)
i = 0
for comm in commspawn:
comm.Bcast([exitcode, MPI.INT], root=0)
comm.Disconnect()
if exitcode != 0:
failed_dir.append(vasprundirs[i])
i = i + 1
end = timer()
print("rank ", self.worldrank, " took ", end-start, " for vasp execution")
if len(failed_dir) != 0:
print("vasp failed in directories: \n "+"\n".join(failed_dir))
sys.stdout.flush()
if rerun == 0:
MPI.COMM_WORLD.Abort()
self.comm.Barrier()
# Rerun if VASP failed
failed_dir = self.comm.bcast(failed_dir,root=0)
if len(failed_dir) != 0 and rerun == 1:
if self.commrank == 0:
print("falling back to damped algorithm")
poscar = Poscar.from_file(output_dir+"/CONTCAR")
VaspInput.update({'POSCAR':poscar})
incar = VaspInput.get('INCAR')
incar.update({'IBRION':3,'POTIM':0.2})
VaspInput.update({'INCAR':incar})
rerun -= 1
self.submit(VaspInput, output_dir, rerun)
elif len(failed_dir) != 0 and rerun > 0:
if self.commrank == 0:
print("rerunning with copied CONTCARS")
poscar = Poscar.from_file(output_dir+"/CONTCAR")
VaspInput.update({'POSCAR':poscar})
rerun -= 1
self.submit(VaspInput, output_dir, rerun)
#commspawn = MPI.COMM_SELF.Spawn(self.path_to_vasp, #/home/issp/vasp/vasp.5.3.5/bin/vasp",
# args=[output_dir],
# maxprocs=self.nprocs)
# Spawn is too slow, can't afford to make it atomic
#commspawn = MPI.COMM_SELF.Spawn(self.path_to_vasp, #/home/issp/vasp/vasp.5.3.5/bin/vasp",
# args=[output_dir,],
# maxprocs=self.nprocs)
# sendbuffer = create_string_buffer(output_dir.encode('utf-8'),255)
# commspawn.Bcast([sendbuffer, 255, MPI.CHAR], root=MPI.ROOT)
#commspawn.Barrier()
#commspawn.Disconnect()
#os.chdir(cwd)
return 0
| skasamatsu/py_mc | py_mc/applications/latgas_abinitio_interface/run_vasp_mpi.py | Python | gpl-3.0 | 8,265 | [
"VASP",
"pymatgen"
] | 8a7cba9c6880f22db1fb612260b78f6e085d19ce30b4bfab005881dfc08d4b00 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2013 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""PS/PDF output generator based on Cairo.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import logging
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Pango, PangoCairo
import cairo
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
import gramps.plugins.lib.libcairodoc as libcairodoc
from gramps.gen.plug.docgen import INDEX_TYPE_ALP, INDEX_TYPE_TOC
from gramps.gen.errors import ReportError
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
LOG = logging.getLogger(".cairodoc")
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# resolution
DPI = 72.0
#------------------------------------------------------------------------
#
# CairoDocgen class
#
#------------------------------------------------------------------------
class CairoDocgen(libcairodoc.CairoDoc):
"""Render the document into a file using a Cairo surface.
"""
def create_cairo_surface(self, fobj, width_in_points, height_in_points):
# See
# http://cairographics.org/documentation/pycairo/3/reference/surfaces.html#class-pssurface-surface
# for the arg semantics.
raise "Missing surface factory override!!!"
def run(self):
"""Create the output file.
The derived class overrides EXT and create_cairo_surface
"""
# get paper dimensions
paper_width = self.paper.get_size().get_width() * DPI / 2.54
paper_height = self.paper.get_size().get_height() * DPI / 2.54
page_width = round(self.paper.get_usable_width() * DPI / 2.54)
page_height = round(self.paper.get_usable_height() * DPI / 2.54)
left_margin = self.paper.get_left_margin() * DPI / 2.54
top_margin = self.paper.get_top_margin() * DPI / 2.54
# create cairo context and pango layout
filename = self._backend.filename
# Cairo can't reliably handle unicode filenames on Linux or
# Windows, so open the file for it.
with open(filename, 'wb') as fd:
try:
surface = self.create_cairo_surface(fd, paper_width,
paper_height)
surface.set_fallback_resolution(300, 300)
cr = cairo.Context(surface)
fontmap = PangoCairo.font_map_new()
fontmap.set_resolution(DPI)
pango_context = fontmap.create_context()
options = cairo.FontOptions()
options.set_hint_metrics(cairo.HINT_METRICS_OFF)
PangoCairo.context_set_font_options(pango_context, options)
layout = Pango.Layout(pango_context)
PangoCairo.update_context(cr, pango_context)
# paginate the document
self.paginate_document(layout, page_width, page_height,
DPI, DPI)
body_pages = self._pages
# build the table of contents and alphabetical index
toc_page = None
index_page = None
toc = []
index = {}
for page_nr, page in enumerate(body_pages):
if page.has_toc():
toc_page = page_nr
if page.has_index():
index_page = page_nr
for mark in page.get_marks():
if mark.type == INDEX_TYPE_ALP:
if mark.key in index:
if page_nr + 1 not in index[mark.key]:
index[mark.key].append(page_nr + 1)
else:
index[mark.key] = [page_nr + 1]
elif mark.type == INDEX_TYPE_TOC:
toc.append([mark, page_nr + 1])
# paginate the table of contents
rebuild_required = False
if toc_page is not None:
toc_pages = self.__generate_toc(layout, page_width,
page_height, toc)
offset = len(toc_pages) - 1
if offset > 0:
self.__increment_pages(toc, index, toc_page, offset)
rebuild_required = True
else:
toc_pages = []
# paginate the index
if index_page is not None:
index_pages = self.__generate_index(layout, page_width,
page_height, index)
offset = len(index_pages) - 1
if offset > 0:
self.__increment_pages(toc, index, index_page, offset)
rebuild_required = True
else:
index_pages = []
# rebuild the table of contents and index if required
if rebuild_required:
if toc_page is not None:
toc_pages = self.__generate_toc(layout, page_width,
page_height, toc)
if index_page is not None:
index_pages = self.__generate_index(layout, page_width,
page_height, index)
# render the pages
if toc_page is not None:
body_pages = body_pages[:toc_page] + toc_pages + \
body_pages[toc_page+1:]
if index_page is not None:
body_pages = body_pages[:index_page] + index_pages + \
body_pages[index_page+1:]
self._pages = body_pages
for page_nr in range(len(self._pages)):
cr.save()
cr.translate(left_margin, top_margin)
self.draw_page(page_nr, cr, layout,
page_width, page_height,
DPI, DPI)
cr.show_page()
cr.restore()
# close the surface (file)
surface.finish()
except IOError as msg:
errmsg = "%s\n%s" % (_("Could not create %s") % filename, msg)
raise ReportError(errmsg)
except Exception as err:
errmsg = "%s\n%s" % (_("Could not create %s") % filename, err)
raise ReportError(errmsg)
def __increment_pages(self, toc, index, start_page, offset):
"""
Increment the page numbers in the table of contents and index.
"""
for n, value in enumerate(toc):
page_nr = toc[n][1]
toc[n][1] = page_nr + (offset if page_nr > start_page else 0)
for key, value in index.items():
index[key] = [page_nr + (offset if page_nr > start_page else 0)
for page_nr in value]
def __generate_toc(self, layout, page_width, page_height, toc):
"""
Generate the table of contents.
"""
self._doc = libcairodoc.GtkDocDocument()
self._active_element = self._doc
self._pages = []
write_toc(toc, self)
self.paginate_document(layout, page_width, page_height, DPI, DPI)
return self._pages
def __generate_index(self, layout, page_width, page_height, index):
"""
Generate the index.
"""
self._doc = libcairodoc.GtkDocDocument()
self._active_element = self._doc
self._pages = []
write_index(index, self)
self.paginate_document(layout, page_width, page_height, DPI, DPI)
return self._pages
def write_toc(toc, doc):
"""
Write the table of contents.
"""
if not toc:
return
doc.start_paragraph('TOC-Title')
doc.write_text(doc.toc_title)
doc.end_paragraph()
doc.start_table('toc', 'TOC-Table')
for mark, page_nr in toc:
doc.start_row()
doc.start_cell('TOC-Cell')
if mark.level == 1:
style_name = "TOC-Heading1"
elif mark.level == 2:
style_name = "TOC-Heading2"
else:
style_name = "TOC-Heading3"
doc.start_paragraph(style_name)
doc.write_text(mark.key)
doc.end_paragraph()
doc.end_cell()
doc.start_cell('TOC-Cell')
doc.start_paragraph(style_name)
doc.write_text(str(page_nr))
doc.end_paragraph()
doc.end_cell()
doc.end_row()
doc.end_table()
def write_index(index, doc):
"""
Write the alphabetical index.
"""
if not index:
return
doc.start_paragraph('IDX-Title')
doc.write_text(doc.index_title)
doc.end_paragraph()
doc.start_table('index', 'IDX-Table')
for key in sorted(index.keys()):
doc.start_row()
doc.start_cell('IDX-Cell')
doc.start_paragraph('IDX-Entry')
doc.write_text(key)
doc.end_paragraph()
doc.end_cell()
doc.start_cell('IDX-Cell')
doc.start_paragraph('IDX-Entry')
pages = [str(page_nr) for page_nr in index[key]]
doc.write_text(', '.join(pages))
doc.end_paragraph()
doc.end_cell()
doc.end_row()
doc.end_table()
#------------------------------------------------------------------------
#
# PdfDoc class
#
#------------------------------------------------------------------------
class PdfDoc(CairoDocgen):
"""Render the document into PDF file using Cairo.
"""
def create_cairo_surface(self, fobj, width_in_points, height_in_points):
return cairo.PDFSurface(fobj, width_in_points, height_in_points)
#------------------------------------------------------------------------
#
# PsDoc class
#
#------------------------------------------------------------------------
class PsDoc(CairoDocgen):
"""Render the document into PS file using Cairo.
"""
EXT = 'ps'
def create_cairo_surface(self, fobj, width_in_points, height_in_points):
return cairo.PSSurface(fobj, width_in_points, height_in_points)
| beernarrd/gramps | gramps/plugins/docgen/cairodoc.py | Python | gpl-2.0 | 11,747 | [
"Brian"
] | a7010fb1020215914a728457b7c03d8d98f3ec082cb0119eec6d1cdba2527155 |
#!/usr/bin/python
#Icefilms.info v1.1.0 - Eldorado
#All code Copyleft (GNU GPL v2) Eldorado and icefilms-xbmc team
############### Imports ############################
#standard module imports
import sys,os
import time,re
import urllib,urllib2,base64
import random
import copy
import threading
import string
import traceback
reload(sys)
sys.setdefaultencoding('utf8')
############ Set prepare_zip to True in order to scrape the entire site to create a new meta pack ############
'''
Setting to true will also enable a new menu option 'Create Meta Pack' which will scrape all categories and download covers & backdrops
'''
#prepare_zip = True
prepare_zip = False
##############################################################################################################
import xbmc, xbmcplugin, xbmcgui, xbmcvfs, datetime
''' Use addon common library for http calls '''
try:
from addon.common.net import Net
from addon.common.addon import Addon
except Exception, e:
xbmc.log('Failed to import script.module.addon.common: %s' % e)
xbmcgui.Dialog().ok("Icefilms Import Failure", "Failed to import addon.common", "A component needed by Icefilms is missing on your system", "Please visit www.tvaddons.ag for support")
net = Net()
addon_id = 'plugin.video.icefilms'
addon = Addon(addon_id, sys.argv)
datapath = addon.get_profile()
try:
from metahandler import metahandlers
metahandler_version = metahandlers.common.addon.get_version()
except Exception, e:
addon.log_error('Failed to import script.module.metahandler: %s' % e)
xbmcgui.Dialog().ok("Icefilms Import Failure", "Failed to import Metahandlers", "A component needed by Icefilms is missing on your system", "Please visit www.tvaddons.ag for support")
########################### Queries ############################
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
imdbnum = addon.queries.get('imdbnum', '')
tmdbnum = addon.queries.get('tmdbnum', '')
mode = addon.queries.get('mode', '')
dirmode = addon.queries.get('dirmode', '')
season_num = addon.queries.get('season', '')
episode_num = addon.queries.get('episode', '')
video_type = addon.queries.get('videoType', '')
video_url = addon.queries.get('videoUrl', '')
stacked_parts = addon.queries.get('stackedParts', '')
nextPage = addon.queries.get('nextPage', '')
search = addon.queries.get('search', '')
video_id = addon.queries.get('t', '')
addon.log_debug('----------------Icefilms Addon Param Info----------------------')
addon.log_debug('--- Version: ' + str(addon.get_version()))
addon.log_debug('--- Mode: ' + str(mode))
addon.log_debug('--- DirMode: ' + str(dirmode))
addon.log_debug('--- URL: ' + str(url))
addon.log_debug('--- Video Id: ' + str(video_id))
addon.log_debug('--- Video Type: ' + str(video_type))
addon.log_debug('--- Video URL: ' + str(video_url))
addon.log_debug('--- Name: ' + str(name))
addon.log_debug('--- IMDB: ' + str(imdbnum))
addon.log_debug('--- TMDB: ' + str(tmdbnum))
addon.log_debug('--- Season: ' + str(season_num))
addon.log_debug('--- Episode: ' + str(episode_num))
addon.log_debug('--- MyHandle: ' + str(sys.argv[1]))
addon.log_debug('---------------------------------------------------------------')
################################################################
#get path to me
icepath = addon.get_path()
#append lib directory
sys.path.append( os.path.join( icepath, 'resources', 'lib' ) )
#imports of things bundled in the addon
import container_urls,clean_dirs,htmlcleaner
import debridroutines
#Database utilities
from db_utils import DB_Connection
db_connection = DB_Connection(addon)
from cleaners import *
from BeautifulSoup import BeautifulSoup
from xgoogle.search import GoogleSearch
#Common Cache
# plugin constants
dbg = False # Set to false if you don't want debugging
#Common Cache
try:
import StorageServer
except:
import storageserverdummy as StorageServer
cache = StorageServer.StorageServer(addon_id)
####################################################
############## Constants / Variables ###############
# global constants
ICEFILMS_URL = addon.get_setting('icefilms-url')
if not ICEFILMS_URL.endswith("/"):
ICEFILMS_URL = ICEFILMS_URL + "/"
ICEFILMS_AJAX = ICEFILMS_URL+'membersonly/components/com_iceplayer/video.phpAjaxResp.php?s=%s&t=%s&app_id=if_' + addon.get_version()
ICEFILMS_AJAX_REFER = ICEFILMS_URL + 'membersonly/components/com_iceplayer/video.php?h=374&w=631&vid=%s&img='
ICEFILMS_REFERRER = ICEFILMS_URL
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36'
ACCEPT = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
VideoType_Movies = 'movie'
VideoType_TV = 'tvshow'
VideoType_Season = 'season'
VideoType_Episode = 'episode'
#useful global strings:
iceurl = ICEFILMS_URL
meta_setting = addon.get_setting('use-meta')
downloadPath = addon.get_setting('download-folder')
#Auto-watch
currentTime = 1
totalTime = 0
callEndOfDirectory = True
#Variable for multi-part
finalPart = True
#Paths Etc
cookie_path = os.path.join(datapath, 'cookies')
downinfopath = os.path.join(datapath, 'downloadinfologs')
cookie_jar = os.path.join(cookie_path, 'cookiejar.lwp')
ice_cookie = os.path.join(cookie_path, 'icefilms.lwp')
art_path = os.path.join(icepath, 'resources', 'art')
####################################################
def xbmcpath(path,filename):
translatedpath = os.path.join(xbmc.translatePath( path ), ''+filename+'')
return translatedpath
def Notify(typeq,title,message,times, line2='', line3=''):
#simplified way to call notifications. common notifications here.
msgList = [message, line2, line3]
if title == '':
title='Icefilms Notification'
if typeq == 'small' or typeq == 'Download Alert':
if times == '':
times='5000'
smallicon=handle_file('smallicon')
addon.show_small_popup(title=title, msg=message, delay=int(times), image=smallicon)
elif typeq == 'big':
addon.show_ok_dialog(msgList, title=title, is_error=False)
else:
addon.show_ok_dialog(msgList, title=title, is_error=False)
def handle_file(filename,getmode=''):
#bad python code to add a get file routine.
if filename == 'smallicon':
return_file = xbmcpath(art_path,'smalltransparent2.png')
elif filename == 'icon':
return_file = xbmcpath(icepath, 'icon.png')
elif filename == 'homepage':
return_file = xbmcpath(art_path,'homepage.png')
elif filename == 'movies':
return_file = xbmcpath(art_path,'movies.png')
elif filename == 'music':
return_file = xbmcpath(art_path,'music.png')
elif filename == 'tvshows':
return_file = xbmcpath(art_path,'tvshows.png')
elif filename == 'movies_fav':
return_file = xbmcpath(art_path,'movies_fav.png')
elif filename == 'tvshows_fav':
return_file = xbmcpath(art_path,'tvshows_fav.png')
elif filename == 'other':
return_file = xbmcpath(art_path,'other.png')
elif filename == 'search':
return_file = xbmcpath(art_path,'search.png')
elif filename == 'standup':
return_file = xbmcpath(art_path,'standup.png')
elif filename == 'localpic':
return_file = xbmcpath(art_path,'local_file.jpg')
if getmode == '':
return return_file
if getmode == 'open':
try:
opened_return_file=openfile(return_file)
return opened_return_file
except:
addon.log_debug('opening failed')
def openfile(filename):
f = xbmcvfs.File(filename)
contents = f.read()
f.close()
return contents
def save(filename,contents):
f = xbmcvfs.File(filename, 'w')
f.write(contents)
f.close()
def appendfile(filename,contents):
f = xbmcvfs.File(filename, 'a')
f.write(contents)
f.close()
def Startup_Routines():
# avoid error on first run if no paths exists, by creating paths
if not xbmcvfs.exists(datapath): xbmcvfs.mkdir(datapath)
if not xbmcvfs.exists(downinfopath): xbmcvfs.mkdir(downinfopath)
if not xbmcvfs.exists(cookie_path): xbmcvfs.mkdir(cookie_path)
# Run the startup routines for special download directory structure
DLDirStartup()
#Initialize cache DB
db_connection.init_database()
#Convert file system favourites to DB
convert_favourites()
# Run the login startup routines
LoginStartup()
# Run the container checking startup routines, if enable meta is set to true
if meta_setting=='true': ContainerStartup()
#Rescan Next Aired on startup - actually only rescans every 24hrs
next_aired = str2bool(addon.get_setting('next-aired'))
if next_aired:
xbmc.executebuiltin("RunScript(%s, silent=true)" % os.path.join(icepath, 'resources/script.tv.show.next.aired/default.py'))
#Upgrade code to convert legacy file system based favourites to cache db
def convert_favourites():
favpath=os.path.join(datapath, 'Favourites', '')
backup_favpath = os.path.join(datapath, 'Favourites_Backup', '')
moviefav=os.path.join(datapath, 'Favourites', 'Movies', '')
tvfav=os.path.join(datapath, 'Favourites', 'TV', '')
try:
if xbmcvfs.exists(favpath):
#Reset DB to start fresh
db_connection.reset_db()
#Process Movie favourites
if xbmcvfs.exists(moviefav):
moviedirs, moviefiles = xbmcvfs.listdir(moviefav)
if moviefiles:
for file in moviefiles:
filecontents = openfile(os.path.join(moviefav, file))
#split it into its component parts
info = favRead(filecontents)
new_url = parse_url(info[1])
db_connection.save_favourite('movie', info[0], new_url, info[3])
#if not xbmcvfs.delete(os.path.join(moviefav, file)):
# raise Exception('Favourite Convert - error deleting movie fav file: %s' % file)
#if not xbmcvfs.rmdir(moviefav):
# raise Exception('Favourite Convert - error deleting movie fav folder: %s' % moviefav)
#Process TV favourites
if xbmcvfs.exists(tvfav):
tvdirs, tvfiles = xbmcvfs.listdir(tvfav)
if tvfiles:
for file in tvfiles:
filecontents = openfile(os.path.join(tvfav, file))
#split it into its component parts
info = favRead(filecontents)
new_url = parse_url(info[1])
db_connection.save_favourite('tvshow', info[0], new_url, info[3])
#if not xbmcvfs.delete(os.path.join(tvfav, file)):
# raise Exception('Favourite Convert - error deleting tv show fav file: %s' % file)
#if not xbmcvfs.rmdir(tvfav):
# raise Exception('Favourite Convert - error deleting tv fav folder: %s' % tvfav)
#if not xbmcvfs.rmdir(favpath):
# raise Exception('Favourite Convert - error deleting favourite folder: %s' % favpath)
if not xbmcvfs.rename(favpath, backup_favpath):
raise Exception('Favourite Convert - error backing up favourites folder: %s' % favpath)
except db_connection.db.IntegrityError, e:
addon.log_error('Favourite Convert - Duplicate favourite attempted to be added: %s' % e)
Notify('small', 'Icefilms Favourites', 'Error occured converting favourites to cache DB', '')
except Exception, e:
addon.log_error('Favourite Convert - error during processing: %s' % e)
Notify('small', 'Icefilms Favourites', 'Error occured converting favourites to cache DB', '')
def parse_url(url):
#Re-do the URL in case user has changed base URL in addon settings
import urlparse
split_url = urlparse.urlsplit(url)
if split_url.path.startswith('/'):
part_url = split_url.path[1:]
else:
part_url = split_url.path
if split_url.query:
part_url = part_url + "?" + split_url.query
return part_url
def DLDirStartup():
# Startup routines for handling and creating special download directory structure
SpecialDirs=addon.get_setting('use-special-structure')
if SpecialDirs == 'true':
if downloadPath:
if xbmcvfs.exists(downloadPath):
tvpath=os.path.join(downloadPath, 'TV Shows', '')
moviepath=os.path.join(downloadPath, 'Movies', '')
#IF BASE DIRECTORY STRUCTURE DOESN'T EXIST, CREATE IT
if not xbmcvfs.exists(tvpath):
xbmcvfs.mkdir(tvpath)
if not xbmcvfs.exists(moviepath):
xbmcvfs.mkdir(moviepath)
else:
#IF DIRECTORIES EXIST, CLEAN DIRECTORY STRUCTURE (REMOVE EMPTY DIRECTORIES)
clean_dirs.do_clean(tvpath)
clean_dirs.do_clean(moviepath)
def LoginStartup():
#Get whether user has set an account to use.
debrid_account = str2bool(addon.get_setting('realdebrid-account'))
movreel_account = str2bool(addon.get_setting('movreel-account'))
HideSuccessfulLogin = str2bool(addon.get_setting('hide-successful-login-messages'))
# #Verify Read-Debrid Account
if debrid_account:
if not addon.get_setting('realdebrid_token'):
try:
rd = debridroutines.RealDebrid()
rd.authorize_resolver()
except Exception, e:
addon.log_error('**** Real-Debrid Error: %s' % e)
Notify('big','Real-Debrid Login Failed','Failed to connect with Real-Debrid.', '', '', 'Please check your internet connection.')
pass
def ContainerStartup():
#Check for previous Icefilms metadata install and delete
meta_folder = os.path.join(datapath, 'meta_caches', '')
if xbmcvfs.exists(meta_folder):
import shutil
try:
addon.log_debug('Removing previous Icefilms meta folder: %s' % meta_folder)
xbmcvfs.rmdir(meta_folder)
except Exception, e:
addon.log_error('Failed to delete Icefilms meta folder: %s' % e)
pass
#Initialize MetaHandler classe
mh=metahandlers.MetaData()
#Check meta cache DB if meta pack has been installed
meta_installed = mh.check_meta_installed(addon_id)
#get containers dict from container_urls.py
containers = container_urls.get()
local_install = False
if addon.get_setting('meta_pack_location_option') == 'Custom':
local_install = True
meta_pack_locaton = addon.get_setting('meta_folder_location')
if not meta_pack_locaton.endswith("/"):
meta_pack_locaton = meta_pack_locaton + "/"
else:
meta_pack_locaton = containers['url_2shared']
if not meta_installed:
#Offer to download the metadata DB
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Download Meta Containers '+str(containers['date'])+' ?', 'There is a metadata container avaliable.','Install it to get meta information for videos.', 'Would you like to get it? Its a small '+str(containers['db_size'])+'MB download.','Remind me later', 'Install')
if ret==True:
#MetaContainer will clean up from previous installs, so good idea to always initialize at addon startup
from metahandler import metacontainers
mc = metacontainers.MetaContainer()
work_path = mc.work_path
#download dem files
get_db_zip=Zip_DL_and_Install(meta_pack_locaton, containers['db_filename'], 'database', work_path, mc, local_install)
#do nice notification
if get_db_zip==True:
Notify('small','Metacontainer DB Installation Success','','')
#Update meta addons table to indicate meta pack was installed with covers
mh.insert_meta_installed(addon_id, last_update=containers['date'])
#Re-check meta_installed
meta_installed = mh.check_meta_installed(addon_id)
else:
Notify('small','Metacontainer DB Installation Failure','','')
def Zip_DL_and_Install(url, filename, installtype, work_folder, mc, local_install=False):
complete = False
if local_install:
#Define local path where zip already exists
filepath=os.path.normpath(os.path.join(url, filename, ''))
complete = True
else:
#define the path to save it to
filepath=os.path.normpath(os.path.join(work_folder, filename, ''))
#link = url + filename
#2Shared download
import resolvers
link = resolvers.SHARED2_HANDLER(url)
filepath_exists=xbmcvfs.exists(filepath)
#if zip does not already exist, download from url, with nice display name.
if not filepath_exists:
addon.log_debug('Downloading zip: %s' % link)
try:
complete = Download(link, filepath, installtype)
except Exception, e:
addon.log_error('******* ERROR - Download Pack Failed: %s' % e)
Notify('big','Error Downloading Meta Pack', '%s' % e, '')
pass
else:
addon.log_debug('zip already downloaded, attempting extraction')
#Run zip install
if complete:
addon.log_debug('*** Handling meta install')
return mc.install_metadata_container(filepath, installtype)
else:
return False
def create_meta_pack():
# This function will scrape all A-Z categories of the entire site
#Insert starting record to addon table so that all data and images are scraped/downloaded
mh=metahandlers.MetaData()
mh.insert_meta_installed(addon_id, last_update='Now', movie_covers='true', tv_covers='true', tv_banners='true', movie_backdrops='true', tv_backdrops='true')
A2Z=[chr(i) for i in xrange(ord('A'), ord('Z')+1)]
addon.log('### GETTING MOVIE METADATA FOR ALL *MUSIC* ENTRIES')
MOVIEINDEX(iceurl + 'music/a-z/1')
addon.log('### GETTING MOVIE METADATA FOR ALL *STANDUP* ENTRIES')
MOVIEINDEX(iceurl + 'standup/a-z/1')
addon.log('### GETTING MOVIE METADATA FOR ALL *OTHER* ENTRIES')
MOVIEINDEX(iceurl + 'other/a-z/1')
addon.log('### GETTING MOVIE METADATA FOR ALL ENTRIES ON: '+'1')
MOVIEINDEX(iceurl + 'movies/a-z/1')
for theletter in A2Z:
addon.log('### GETTING MOVIE METADATA FOR ALL ENTRIES ON: '+theletter)
MOVIEINDEX(iceurl + 'movies/a-z/' + theletter)
addon.log('### GETTING TV METADATA FOR ALL ENTRIES ON: '+'1')
TVINDEX(iceurl + 'tv/a-z/1')
for theletter in A2Z:
addon.log('### GETTING TV METADATA FOR ALL ENTRIES ON: '+theletter)
TVINDEX(iceurl + 'tv/a-z/' + theletter)
#Ensure to reset addon fields to false so database is ready to deploy
mh.update_meta_installed(addon_id, movie_covers='false', tv_covers='false', tv_banners='false', movie_backdrops='false', tv_backdrops='false')
def CATEGORIES(): # (homescreen of addon)
#run startup stuff
Startup_Routines()
#get necessary paths
homepage=handle_file('homepage','')
tvshows=handle_file('tvshows','')
movies=handle_file('movies','')
music=handle_file('music','')
standup=handle_file('standup','')
other=handle_file('other','')
search=handle_file('search','')
#add directories
addDir('Favourites', iceurl, 57, os.path.join(art_path, 'favourites.png'))
addDir('Watch Queue', '', 'watch_queue', os.path.join(art_path,'favourites.png'))
addDir('TV Shows', iceurl+'tv/a-z/1',50,tvshows)
addDir('Movies', iceurl+'movies/a-z/1',51,movies)
addDir('Music', iceurl+'music/a-z/1',52,music)
addDir('Stand Up Comedy', iceurl+'standup/a-z/1',53,standup)
addDir('Other', iceurl+'other/a-z/1',54,other)
# addDir('Recently Added Movies', iceurl+'index',60,os.path.join(art_path,'recently added.png'))
# addDir('Latest Releases', iceurl+'index',61,os.path.join(art_path,'latest releases.png'))
# addDir('Being Watched Now', iceurl+'index',62,os.path.join(art_path,'being watched now.png'))
if str2bool(addon.get_setting('recent-watched')):
addDir('Recently Watched', '', 'recent_watched', os.path.join(art_path,'being watched now.png'))
addDir('Search',iceurl,55,search)
VaddDir('Help', '', 'addon_help', '')
#Only show if prepare_zip = True - meaning you are creating a meta pack
if prepare_zip:
addDir('Create Meta Pack',iceurl,666,'')
def sort_list(list):
#create new empty list
stringList = []
for item in list:
stringList.append('|'.join(item[1:]))
#sort list alphabetically and return it.
tupleList = [(x.lower(), x) for x in stringList]
articles = ("a","an","the")
tupleList.sort(key=lambda s: tuple(word for word in s[1].split() if word.lower() not in articles))
return [x[1] for x in tupleList]
def favRead(string):
try:
splitter=re.split('\|+', string)
name=splitter[0]
url=splitter[1]
mode=int(splitter[2])
try:
imdb_id=str(splitter[3])
except:
imdb_id=''
except:
return None
else:
return name,url,mode,imdb_id
def FAVOURITES(url):
#get necessary paths
tvshows=handle_file('tvshows_fav','')
movies=handle_file('movies_fav','')
addDir('TV Shows',iceurl,570,tvshows)
addDir('Movies',iceurl,571,movies)
def METAFIXER(url, videoType):
#Icefilms urls passed to me will have their proper names and imdb numbers returned.
source=GetURL(url)
#get proper name from the page. (in case it is a weird name)
if videoType==VideoType_Movies:
#get imdb number.
match=re.compile('<a class=iframe href=http://www.imdb.com/title/(.+?)/ ').findall(source)
#check if it is an episode.
epcheck=re.search('<a href=/tv/series/',source)
#if it is, return the proper series name as opposed to the mirror page name.
if epcheck is not None:
tvget=re.compile('<a href=/tv/series/(.+?)>').findall(source)
tvurl=iceurl+'tv/series/'+str(tvget[0])
#load ep page and get name from that. sorry icefilms bandwidth!
tvsource=GetURL(tvurl)
name=re.compile('<h1>(.+?)<a class').findall(tvsource)
#return mirror page name.
if epcheck is None:
name=re.compile('''<span style="font-size:large;color:white;">(.+?)</span>''').findall(source)
name=CLEANUP(name[0])
return name,match[0]
elif videoType==VideoType_TV:
#TV
name=re.compile('<h1>(.+?)<a class').findall(source)
match=re.compile('href="http://www.imdb.com/title/(.+?)/"').findall(source)
name=CLEANUP(name[0])
return name,match[0]
def ADD_TO_FAVOURITES(name, url, imdbnum, videoType):
addon.log_debug('Adding to favourites: name: %s, imdbnum: %s, url: %s' % (name, imdbnum, url))
try:
if name and url:
#fix name and imdb number for Episode List entries in Search.
if imdbnum == 'nothing':
metafix=METAFIXER(url, videoType)
name=metafix[0]
imdbnum=metafix[1]
addon.log_debug('NAME: %s URL: %s IMDB NUMBER: %s' % (name,url,imdbnum))
#Delete HD entry from filename. using name as filename makes favourites appear alphabetically.
adjustedname=Clean_Windows_String(name).strip()
new_url = parse_url(url)
db_connection.save_favourite(videoType, name, new_url, imdbnum)
Notify('small','Icefilms Favourites', name + ' added to favourites','','6000')
#Rescan Next Aired
next_aired = str2bool(addon.get_setting('next-aired'))
if next_aired:
xbmc.executebuiltin("RunScript(%s, silent=true)" % os.path.join(icepath, 'resources/script.tv.show.next.aired/default.py'))
else:
raise Exception('Unable to add favourite due to blank name or url')
except db_connection.db.IntegrityError, e:
addon.log_error('Favourite already exists: %s' % name)
Notify('small','Icefilms Favourites', '%s favourite already exists' % name,'','6000')
except Exception, e:
addon.log_error('Error adding favourite: %s' % e)
Notify('small','Icefilms Favourites', 'Unable to add to favourites','','')
def DELETE_FROM_FAVOURITES(url):
addon.log_debug('Deleting from favourites: url: %s' % url)
try:
new_url = parse_url(url)
db_connection.delete_favourite(new_url)
xbmc.executebuiltin("XBMC.Container.Refresh")
except Exception, e:
addon.log_error('Error deleting favourite: %s' % e)
Notify('small','Icefilms Favourites', 'Error while attempting to delete favourite','','')
def CLEAR_FAVOURITES(url):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('WARNING!', 'Delete all your favourites?','','','Cancel','Go Nuclear')
if ret==True:
db_connection.clear_favourites()
xbmc.executebuiltin("XBMC.Container.Refresh")
def getFavourites(videoType):
fav_list = db_connection.get_favourites(videoType)
new_fav_list = sort_list(fav_list)
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
if videoType == VideoType_TV:
mode = 12
if videoType == VideoType_Season:
mode = 13
elif videoType == VideoType_Episode:
mode = 14
elif videoType == VideoType_Movies:
mode = 100
#for each string
for fav_string in new_fav_list:
fav = fav_string.split('|')
new_url = iceurl + fav[1]
if meta_setting=='true' and meta_installed:
#return the metadata dictionary
if fav[2] is not None:
#return the metadata dictionary
meta=metaget.get_meta(videoType, fav[0], imdb_id=fav[2])
if meta is None:
#add all the items without meta
addDir(fav[0], new_url, mode, '',delfromfav=True, totalItems=len(new_fav_list), favourite=True)
else:
#add directories with meta
addDir(fav[0], new_url, mode, '', meta=meta, delfromfav=True, imdb=fav[2], totalItems=len(new_fav_list), meta_install=meta_installed, favourite=True)
else:
#add all the items without meta
addDir(fav[0], new_url, mode, '', delfromfav=True, totalItems=len(new_fav_list), favourite=True)
else:
#add all the items without meta
addDir(fav[0], new_url, mode, '', delfromfav=True, totalItems=len(new_fav_list), favourite=True)
if videoType == VideoType_TV:
setView('tvshows', 'tvshows-view')
elif videoType == VideoType_Movies:
setView('movies', 'movies-view')
def check_episode(name):
#Episode will have eg. 01x15 within the name, else we can assume it's a movie
if re.search('([0-9]+x[0-9]+)', name):
return True
else:
return False
def get_video_name(name):
video = {}
if check_episode(name):
r = re.search('[0-9]+x[0-9]+ (.+?) [(]([0-9]{4})[)]', name)
else:
r = re.search('(.+?) [(]([0-9]{4})[)]',name)
if r:
video['name'] = r.group(1)
video['year'] = r.group(2)
else:
video['name'] = name
video['year'] = ''
return video
def check_video_meta(name, metaget):
#Determine if it's a movie or tvshow by the title returned - tv show will contain eg. 01x15 to signal season/episode number
episode = check_episode(name)
if episode:
episode_info = re.search('([0-9]+)x([0-9]+)', name)
season = int(episode_info.group(1))
episode = int(episode_info.group(2))
#Grab episode title, check for regex on it both ways
episode_title = re.search('(.+?) [0-9]+x[0-9]+', name)
if not episode_title:
episode_title = re.search('[0-9]+x[0-9]+ (.+)', name)
episode_title = episode_title.group(1)
tv_meta = metaget.get_meta('tvshow',episode_title)
meta=metaget.get_episode_meta(episode_title, tv_meta['imdb_id'], season, episode)
else:
r=re.search('(.+?) [(]([0-9]{4})[)]',name)
if r:
name = r.group(1)
year = r.group(2)
else:
year = ''
meta = metaget.get_meta('movie',name, year=year)
return meta
# Quick helper method to check and add listing tag folders - popularity, recently added etc.
def folder_tags(folder_text):
hide_tags = str2bool(addon.get_setting('hide-tags'))
if not hide_tags:
VaddDir(folder_text, '', 0, '', False)
def RECENT(url):
html = GetURL(url)
#initialise meta class before loop
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
recent_movies = re.search('<h2>Recently Added Movies</h2>(.+?)</div>', html, re.DOTALL)
if recent_movies:
text = re.compile("<span style='font-size:14px;'>(.+?)<li>").findall(recent_movies.group(1))
#Add the first line
folder_tags('[COLOR blue]' + text[0] + '[/COLOR]')
mirlinks=re.compile('<a href=/(.+?)>(.+?)</a>[ ]*<(.+?)>').findall(recent_movies.group(1))
for url,name,hd in mirlinks:
url=iceurl+url
name=CLEANUP(name)
#Check if it's an HD source and add a tag to the name
if re.search('color:red', hd):
new_name = name + ' [COLOR red]*HD*[/COLOR]'
else:
new_name = name
if meta_installed and meta_setting=='true':
meta = check_video_meta(name, metaget)
addDir(new_name,url,100,'',meta=meta,disablefav=True, disablewatch=True, meta_install=meta_installed)
else:
addDir(new_name,url,100,'',disablefav=True, disablewatch=True)
setView('movies', 'movies-view')
def LATEST(url):
link=GetURL(url)
#initialise meta class before loop
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
homepage=re.compile('<h1>Recently Added</h1>(.+?)<h1>Statistics</h1>', re.DOTALL).findall(link)
for scrape in homepage:
scrape='<h1>Recently Added</h1>'+scrape+'<h1>Statistics</h1>'
latrel=re.compile('<h1>Latest Releases</h1>(.+?)<h1>Being Watched Now</h1>', re.DOTALL).findall(scrape)
for scraped in latrel:
text = re.compile("<span style='font-size:14px;'>(.+?)<li>").findall(scraped)
#Add the first line
folder_tags('[COLOR blue]' + text[0] + '[/COLOR]')
mirlinks=re.compile('<a href=/(.+?)>(.+?)</a>[ ]*<(.+?)>').findall(scraped)
for url,name,hd in mirlinks:
url=iceurl+url
name=CLEANUP(name)
if check_episode(name):
mode = 14
else:
mode = 100
#Check if it's an HD source and add a tag to the name
if re.search('color:red', hd):
new_name = name + ' [COLOR red]*HD*[/COLOR]'
else:
new_name = name
if meta_installed and meta_setting=='true':
meta = check_video_meta(name, metaget)
addDir(new_name,url,mode,'',meta=meta,disablefav=True, disablewatch=True, meta_install=meta_installed)
else:
addDir(new_name,url,mode,'',disablefav=True, disablewatch=True)
setView(None, 'default-view')
def WATCHINGNOW(url):
link=GetURL(url)
#initialise meta class before loop
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
homepage=re.compile('<h1>Recently Added</h1>(.+?)<h1>Statistics</h1>', re.DOTALL).findall(link)
for scrape in homepage:
scrapy='<h1>Recently Added</h1>'+scrape+'<h1>Statistics</h1>'
watnow=re.compile('<h1>Being Watched Now</h1>(.+?)<h1>Statistics</h1>', re.DOTALL).findall(scrapy)
for scraped in watnow:
mirlinks=re.compile('href=/(.+?)>(.+?)</a>[ ]*<(.+?)>').findall(scraped)
for url,name,hd in mirlinks:
url=iceurl+url
name=CLEANUP(name)
if check_episode(name):
mode = 14
else:
mode = 100
#Check if it's an HD source and add a tag to the name
if re.search('color:red', hd):
new_name = name + ' [COLOR red]*HD*[/COLOR]'
else:
new_name = name
if meta_installed and meta_setting=='true':
meta = check_video_meta(name, metaget)
addDir(new_name,url,mode,'',meta=meta,disablefav=True, disablewatch=True, meta_install=meta_installed)
else:
addDir(new_name,url,mode,'',disablefav=True, disablewatch=True)
setView(None, 'default-view')
def recently_watched():
addDir('Movies', '', '572', '', disablewatch=True)
addDir('TV Episodes', '', '573','', disablewatch=True)
VaddDir('[COLOR red]** Clear All Lists[/COLOR]', '', 'clear_watched', '')
def get_recent_watched(videoType):
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
if videoType == VideoType_TV:
mode = 12
if videoType == VideoType_Season:
mode = 13
elif videoType == VideoType_Episode:
mode = 14
elif videoType == VideoType_Movies:
mode = 100
watch_list = db_connection.get_watched(videoType)
#for each string
for watch in watch_list:
if watch[8] > 0:
new_name = '[COLOR blue][' + format_time(watch[8]) + '][/COLOR] - ' + watch[2] + ' [' + watch[3] + ']'
else:
new_name = watch[2] + ' [' + watch[3] + ']'
new_url = iceurl + watch[0]
if meta_setting=='true' and meta_installed:
#return the metadata dictionary
if watch[4] is not None:
#return the metadata dictionary
if videoType == VideoType_Movies or videoType == VideoType_TV:
meta=metaget.get_meta(videoType, watch[2], imdb_id=watch[4])
elif videoType == VideoType_Episode:
meta=metaget.get_episode_meta('', watch[6], watch[4], watch[5], episode_title=watch[2])
if meta is None:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(watch_list), recentWatched=True)
else:
#add directories with meta
addDir(new_name, new_url, mode, '', meta=meta, imdb=watch[4], totalItems=len(watch_list), meta_install=meta_installed, recentWatched=True)
else:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(watch_list), recentWatched=True)
else:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(watch_list), recentWatched=True)
if len(watch_list) > 0:
if videoType == VideoType_TV:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_tv_watched', '')
elif videoType == VideoType_Movies:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_movie_watched', '')
elif videoType == VideoType_Episode:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_episode_watched', '')
if videoType == VideoType_TV:
setView('tvshows', 'tvshows-view')
elif videoType == VideoType_Movies:
setView('movies', 'movies-view')
elif videoType == VideoType_Episode:
setView('episodes', 'episodes-view')
def clear_watched(videoType=None):
dialog = xbmcgui.Dialog()
if videoType:
ret = dialog.yesno('Delete Watched List?', 'Do you wish to delete the current watched list?', '','This cannot be undone!')
else:
ret = dialog.yesno('Delete Watched Lists?', 'Do you wish to delete all of your watched lists?', '','This cannot be undone!')
if ret == True:
addon.log_debug('Clearing watched list for: %s' % videoType)
db_connection.flush_watched(videoType)
xbmc.executebuiltin("XBMC.Container.Refresh")
def remove_watched():
addon.log_debug('Removing item from watched list: %s' % url)
db_connection.clear_watched(parse_url(url))
xbmc.executebuiltin("XBMC.Container.Refresh")
def watch_queue():
addDir('Movies', '', '574', '', disablewatch=True)
addDir('TV Episodes', '', '575','', disablewatch=True)
VaddDir('[COLOR red]** Clear All Lists[/COLOR]', '', 'clear_queue', '')
def clear_queue(videoType=None):
dialog = xbmcgui.Dialog()
if videoType:
ret = dialog.yesno('Delete Queue List?', 'Do you wish to delete the current queue list?', '','This cannot be undone!')
else:
ret = dialog.yesno('Delete Queue Lists?', 'Do you wish to delete all of your queue lists?', '','This cannot be undone!')
if ret == True:
addon.log_debug('Clearing queue list for: %s' % videoType)
db_connection.flush_queue(videoType)
xbmc.executebuiltin("XBMC.Container.Refresh")
def get_queue_list(videoType):
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
meta_installed = False
if videoType == VideoType_TV:
mode = 12
if videoType == VideoType_Season:
mode = 13
elif videoType == VideoType_Episode:
mode = 14
elif videoType == VideoType_Movies:
mode = 100
queue_list = db_connection.get_queue(videoType)
#for each string
for queue in queue_list:
if queue[8] > 0:
new_name = '[COLOR blue][' + format_time(queue[8]) + '][/COLOR] - ' + queue[2] + ' [' + queue[3] + ']'
else:
new_name = queue[2] + ' [' + queue[3] + ']'
new_url = iceurl + queue[0]
if meta_setting=='true' and meta_installed:
#return the metadata dictionary
if queue[4] is not None:
#return the metadata dictionary
if videoType == VideoType_Movies or videoType == VideoType_TV:
meta=metaget.get_meta(videoType, queue[2], imdb_id=queue[4])
elif videoType == VideoType_Episode:
meta=metaget.get_episode_meta('', queue[6], queue[4], queue[5], episode_title=queue[2])
if meta is None:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(queue_list), queueList=True)
else:
#add directories with meta
addDir(new_name, new_url, mode, '', meta=meta, imdb=queue[4], totalItems=len(queue_list), meta_install=meta_installed, queueList=True)
else:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(queue_list), queueList=True)
else:
#add all the items without meta
addDir(new_name, new_url, mode, '', totalItems=len(queue_list), queueList=True)
if len(queue_list) > 0:
if videoType == VideoType_TV:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_tv_queue', '')
elif videoType == VideoType_Movies:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_movie_queue', '')
elif videoType == VideoType_Episode:
VaddDir('[COLOR red]** Clear List[/COLOR]', '', 'clear_episode_queue', '')
if videoType == VideoType_TV:
setView('tvshows', 'tvshows-view')
elif videoType == VideoType_Movies:
setView('movies', 'movies-view')
elif videoType == VideoType_Episode:
setView('episodes', 'episodes-view')
def remove_queue():
addon.log_debug('Removing item from queue list: %s' % url)
db_connection.clear_queue(parse_url(url))
xbmc.executebuiltin("XBMC.Container.Refresh")
def add_queue():
try:
addon.log_debug('Adding item to queue list: %s' % url)
video = get_video_name(name)
db_connection.save_queue(parse_url(url), video_type, video['name'], video['year'], season_num, episode_num, imdbnum)
Notify('small','Icefilms Watch Queue', name + ' added to Queue list','','6000')
except db_connection.db.IntegrityError, e:
addon.log_error('Queue item already exists: %s' % name)
Notify('small','Icefilms Watch Queue', '%s Queue item already exists' % name,'','6000')
except Exception, e:
addon.log_error('Error adding to Queue: %s' % e)
Notify('small','Icefilms Watch Queue', 'Unable to add Queue item','','')
def SEARCH(url):
SEARCHBYPAGE(url, 0)
def SEARCHBYPAGE(url, page):
kb = xbmc.Keyboard('', 'Search Icefilms.info', False)
kb.doModal()
if (kb.isConfirmed()):
search = kb.getText()
if search != '':
DoEpListSearch(search)
DoSearch(url, search, page)
setView('movies', 'movies-view')
def KnownSearch(search, url):
DoEpListSearch(search)
DoSearch(url, search, 0)
def DoSearch(iurl, search, nextPage):
finished = False
more = False
results = None
url = 'site:' + iurl + 'ip '+search+''
gs = GoogleSearch(url)
gs.results_per_page = 10
while not finished:
gs.page = nextPage
if (results == None):
results = gs.get_results()
else:
finished = True
local = gs.get_results()
for res in local:
if not FindSearchResult(res.title, results):
finished = False
results.append(res)
nextPage = nextPage + 1
results_per_page = int(addon.get_setting('search-results'))
if len(results) >= results_per_page:
more = True
finished = True
find_meta_for_search_results(results, 100)
if more:
#leading space ensures the menu item always appears at end of list regardless of current sort order
name = ' Get More...'
sysname = urllib.quote_plus(name)
sysurl = urllib.quote_plus(iurl)
icon = handle_file('search','')
liz = xbmcgui.ListItem(name, iconImage=icon, thumbnailImage=icon)
liz.setInfo(type="Video", infoLabels={"Title": name})
u = sys.argv[0] + "?url=" + sysurl + "&mode=" + str(555) + "&name=" + sysname + "&search=" + search + "&nextPage=" + str(nextPage)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
def FindSearchResult(name, results):
for res in results:
if res.title == name:
return True
return False
def DoEpListSearch(search):
tvurl = iceurl + 'tv/series'
# use urllib.quote_plus() on search instead of re.sub ?
searcher=urllib.quote_plus(search)
#searcher=re.sub(' ','+',search)
url='http://www.google.com/search?hl=en&q=site:'+tvurl+'+'+searcher+'&btnG=Search&aq=f&aqi=&aql=&oq='
link=GetURL(url)
match=re.compile('<h3 class="r"><a href="'+tvurl+'(.+?)"(.+?)">(.+?)</h3>').findall(link)
match = sorted(match, key=lambda result: result[2])
if len(match) == 0:
link = link.replace('<b>', '').replace('</b>', '')
match=re.compile('<h3 class="r"><a href="/url\?q='+tvurl+'(.+?)&(.+?)">(.+?)</h3>').findall(link)
find_meta_for_search_results(match, 12, search)
def TVCATEGORIES(url):
caturl = iceurl+'tv/'
setmode = '11'
addDir('A-Z Directories',caturl+'a-z/1',10,os.path.join(art_path,'az directories.png'))
ADDITIONALCATS(setmode,caturl)
if str2bool(addon.get_setting('recent-watched')):
addDir('Recently Watched', '', 'recent_watched_episode', os.path.join(art_path,'being watched now.png'))
addDir('Watch Queue', '', 'watch_queue_episode', os.path.join(art_path,'favourites.png'))
addDir('Favourites', iceurl, 570, os.path.join(art_path, 'favourites.png'))
setView(None, 'default-view')
def MOVIECATEGORIES(url):
caturl = iceurl+'movies/'
setmode = '2'
addDir('A-Z Directories',caturl+'a-z/1',1,os.path.join(art_path,'az directories.png'))
ADDITIONALCATS(setmode,caturl)
if str2bool(addon.get_setting('recent-watched')):
addDir('Recently Watched', '', 'recent_watched_movie', os.path.join(art_path,'being watched now.png'))
addDir('Watch Queue', '', 'watch_queue_movie', os.path.join(art_path,'favourites.png'))
addDir('Favourites', iceurl, 571, os.path.join(art_path, 'favourites.png'))
setView(None, 'default-view')
def MUSICCATEGORIES(url):
caturl = iceurl+'music/'
setmode = '2'
addDir('A-Z List',caturl+'a-z/1',setmode,os.path.join(art_path,'az lists.png'))
ADDITIONALCATS(setmode,caturl)
setView(None, 'default-view')
def STANDUPCATEGORIES(url):
caturl = iceurl+'standup/'
setmode = '2'
addDir('A-Z List',caturl+'a-z/1',setmode,os.path.join(art_path,'az lists.png'))
ADDITIONALCATS(setmode,caturl)
setView(None, 'default-view')
def OTHERCATEGORIES(url):
caturl = iceurl+'other/'
setmode = '2'
addDir('A-Z List',caturl+'a-z/1',setmode,os.path.join(art_path,'az lists.png'))
ADDITIONALCATS(setmode,caturl)
setView(None, 'default-view')
def ADDITIONALCATS(setmode,caturl):
if caturl == iceurl+'movies/':
addDir('HD 720p',caturl,63,os.path.join(art_path,'HD 720p.png'))
PopRatLat(setmode,caturl,'1')
addDir('Genres',caturl,64,os.path.join(art_path,'genres.png'))
def PopRatLat(modeset,caturl,genre):
if caturl == iceurl+'tv/':
setmode = '11'
else:
setmode = '2'
addDir('Popular',caturl+'popular/'+genre,setmode,os.path.join(art_path,'popular.png'))
addDir('Highly Rated',caturl+'rating/'+genre,setmode,os.path.join(art_path,'highly rated.png'))
addDir('Latest Releases',caturl+'release/'+genre,setmode,os.path.join(art_path,'latest releases.png'))
addDir('Recently Added',caturl+'added/'+genre,setmode,os.path.join(art_path,'recently added.png'))
setView(None, 'default-view')
def HD720pCat(url):
PopRatLat('2',url,'hd')
setView(None, 'default-view')
def Genres(url):
addDir('Action',url,70,'')
addDir('Animation',url,71,'')
addDir('Comedy',url,72,'')
addDir('Documentary',url,73,'')
addDir('Drama',url,74,'')
addDir('Family',url,75,'')
addDir('Horror',url,76,'')
addDir('Romance',url,77,'')
addDir('Sci-Fi',url,78,'')
addDir('Thriller',url,79,'')
setView(None, 'default-view')
def Action(url):
PopRatLat('2',url,'action')
setView(None, 'default-view')
def Animation(url):
PopRatLat('2',url,'animation')
setView(None, 'default-view')
def Comedy(url):
PopRatLat('2',url,'comedy')
setView(None, 'default-view')
def Documentary(url):
PopRatLat('2',url,'documentary')
setView(None, 'default-view')
def Drama(url):
PopRatLat('2',url,'drama')
setView(None, 'default-view')
def Family(url):
PopRatLat('2',url,'family')
setView(None, 'default-view')
def Horror(url):
PopRatLat('2',url,'horror')
setView(None, 'default-view')
def Romance(url):
PopRatLat('2',url,'romance')
setView(None, 'default-view')
def SciFi(url):
PopRatLat('2',url,'sci-fi')
setView(None, 'default-view')
def Thriller(url):
PopRatLat('2',url,'thriller')
setView(None, 'default-view')
def MOVIEA2ZDirectories(url):
setmode = '2'
caturl = iceurl+'movies/a-z/'
#Generate A-Z list and add directories for all letters.
A2Z=[chr(i) for i in xrange(ord('A'), ord('Z')+1)]
#Add number directory
addDir ('#1234',caturl+'1',setmode,os.path.join(art_path,'letters','1.png'))
for theletter in A2Z:
addDir (theletter,caturl+theletter,setmode,os.path.join(art_path,'letters',theletter+'.png'))
setView(None, 'default-view')
def TVA2ZDirectories(url):
setmode = '11'
caturl = iceurl+'tv/a-z/'
#Generate A-Z list and add directories for all letters.
A2Z=[chr(i) for i in xrange(ord('A'), ord('Z')+1)]
#Add number directory
addDir ('#1234',caturl+'1',setmode,os.path.join(art_path,'letters','1.png'))
for theletter in A2Z:
addDir (theletter,caturl+theletter,setmode,os.path.join(art_path,'letters',theletter+'.png'))
setView(None, 'default-view')
def MOVIEINDEX(url):
#Indexer for most things. (Movies,Music,Stand-up etc)
link=GetURL(url)
# we do this to fix the problem when there is no imdb_id.
# I have found only one movie with this problem, but we must check this...
link = re.sub('<a name=i id=>','<a name=i id=None>',link)
#initialise meta class before loop
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
temp = re.compile('(<h3>|<a name=i id=.+?></a><img class=star><a href=)(.+?)(<div|</h3>|>(.+?)<br>)').findall(link)
for tag, link, longname, name in temp:
if tag == '<h3>':
folder_tags('[COLOR blue]' + link + '[/COLOR]')
else:
string = tag + link + longname + name
scrape=re.compile('<a name=i id=(.+?)></a><img class=star><a href=/(.+?)>(.+?)<br>').findall(string)
for imdb_id,url,name in scrape:
if meta_setting=='true':
ADD_ITEM(metaget,meta_installed,imdb_id,url,name,100, totalitems=len(temp))
else:
#add without metadata -- imdb is still passed for use with Add to Favourites
for imdb_id,url,name in scrape:
name=CLEANUP(name)
addDir(name,iceurl+url,100,'',imdb='tt'+str(imdb_id), totalItems=len(scrape))
# Enable library mode & set the right view for the content
setView('movies', 'movies-view')
def TVINDEX(url):
#Indexer for TV Shows only.
link=GetURL(url)
#initialise meta class before loop
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
#list scraper now tries to get number of episodes on icefilms for show. this only works in A-Z.
#match=re.compile('<a name=i id=(.+?)></a><img class=star><a href=/(.+?)>(.+?)</a>').findall(link)
firstText = re.compile('<h3>(.+?)</h3>').findall(link)
if firstText:
if firstText[0].startswith('Rated'):
firstText[0] = string.split(firstText[0], '<')[0]
regex = '<h3>(.+?)<div'
else:
regex = '<h3>(.+?)</h3>'
folder_tags('[COLOR blue]' + firstText[0] + '[/COLOR]')
else:
regex = '<h3>(.+?)</h3>'
scrape=re.search('<a name=i id=(.+?)></a><img class=star><a href=/(.+?)>(.+?)<br>', link)
if meta_setting=='true':
ADD_ITEM(metaget,meta_installed,scrape.group(1),scrape.group(2),scrape.group(3),12, totalitems=1)
else:
addDir(scrape.group(3),iceurl + scrape.group(2),12,'',imdb='tt'+str(scrape.group(1)), totalItems=1)
#Break the remaining source into seperate lines and check if it contains a text entry
temp = re.compile('r>(.+?)<b').findall(link)
for entry in temp:
text = re.compile(regex).findall(entry)
if text:
folder_tags('[COLOR blue]' + text[0] + '[/COLOR]')
scrape=re.compile('<a name=i id=(.+?)></a><img class=star><a href=/(.+?)>(.+?)</a>').findall(entry)
if scrape:
for imdb_id,url,name in scrape:
if meta_setting=='true':
ADD_ITEM(metaget,meta_installed,imdb_id,url,name,12, totalitems=len(temp))
else:
#add without metadata -- imdb is still passed for use with Add to Favourites
for imdb_id,url,name in scrape:
name=CLEANUP(name)
addDir(name,iceurl+url,12,'',imdb='tt'+str(imdb_id), totalItems=len(scrape))
# Enable library mode & set the right view for the content
setView('tvshows', 'tvshows-view')
def TVSEASONS(url, imdb_id):
# displays by seasons. pays attention to settings.
FlattenSingleSeasons = addon.get_setting('flatten-single-season')
source=GetURL(url)
#Save the tv show name for use in special download directories.
match=re.compile('<h1>(.+?)<a class').findall(source)
cache.set('tvshowname',match[0])
r=re.search('(.+?) [(][0-9]{4}[)]',match[0])
if r:
showname = r.group(1)
else:
showname = match[0]
# get and save the TV Show poster link
try:
imgcheck1 = re.search('<a class=img target=_blank href=', link)
imgcheck2 = re.search('<iframe src=http://referer.us/f/\?url=', link)
if imgcheck1 is not None:
match4=re.compile('<a class=img target=_blank href=(.+?)>').findall(link)
cache.set('poster',match4[0])
if imgcheck2 is not None:
match5=re.compile('<iframe src=http://referer.us/f/\?url=(.+?) width=').findall(link)
cache.set('poster',match5[0])
except:
pass
ep_list = str(BeautifulSoup(source).find("span", { "class" : "list" } ))
showname = CLEANUP_FOR_META(showname)
season_list=re.compile('<h3><a name.+?></a>(.+?)<a.+?</a></h3>').findall(ep_list)
listlength=len(season_list)
if listlength > 0:
seasons = str(season_list)
season_nums = re.compile('Season ([0-9]{1,2}) ').findall(seasons)
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
if meta_installed:
season_meta = metaget.get_seasons(showname, imdb_id, season_nums)
else:
meta_installed = False
num = 0
for seasons in season_list:
if FlattenSingleSeasons==True and listlength <= 1:
#proceed straight to adding episodes.
TVEPISODES(seasons.strip(),source=ep_list,imdb_id=''+str(imdb_id))
else:
#save episode page source code
cache.set('episodesrc',repr(ep_list))
#add season directories
if meta_installed and meta_setting=='true' and season_meta:
temp = season_meta[num]
addDir(seasons.strip(),'',13,temp['cover_url'],imdb=''+str(imdb_id), meta=season_meta[num], totalItems=len(season_list), meta_install=meta_installed)
num = num + 1
else:
addDir(seasons.strip(),'',13,'', imdb=''+str(imdb_id), totalItems=len(season_list))
setView('seasons', 'seasons-view')
def TVEPISODES(name,url=None,source=None,imdb_id=None):
#Save the season name for use in the special download directories.
cache.set('mediatvseasonname',name)
#If source wasn't passed to function, open the file it should be saved to.
if source is None:
source = eval(cache.get('episodesrc'))
#special hack to deal with annoying re problems when recieving brackets ( )
if re.search('\(',name) is not None:
name = str((re.split('\(+', name))[0])
#name=str(name[0])
#quick hack of source code to simplfy scraping.
source=re.sub('</span>','<h3>',source)
#get all the source under season heading.
#Use .+?/h4> not .+?</h4> for The Daily Show et al to work.
match=re.compile('<h3><a name="[0-9]+?"></a>'+name+'.+?/h3>(.+?)<h3>').findall(source)
for seasonSRC in match:
addon.log_debug('Season Source is: %s' % name)
TVEPLINKS(seasonSRC, name, imdb_id)
setView('episodes', 'episodes-view')
def TVEPLINKS(source, season, imdb_id):
# displays all episodes in the source it is passed.
match=re.compile('<img class="star" /><a href="/(.+?)&">(.+?)</a>([<b>HD</b>]*)<br />').findall(source)
if meta_setting=='true':
#initialise meta class before loop
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
else:
metaget=False
meta_installed=False
for url, name, hd in match:
name = name + ' ' + hd
addon.log_debug("TVepLinks name: %s " % name)
get_episode(season, name, imdb_id, url, metaget, meta_installed, totalitems=len(match))
# Enable library mode & set the right view for the content
setView('episodes', 'episodes-view')
def LOADMIRRORS(url):
# This proceeds from the file page to the separate frame where the mirrors can be found,
# then executes code to scrape the mirrors
html=GetURL(url)
video_url = parse_url(url)
#---------------Begin phantom metadata getting--------
#Save metadata on page to files, for use when playing.
# Also used for creating the download directory structures.
ice_meta = {}
#Grab video name
namematch = re.search('''<span style="font-size:large;color:white;">(.+?)</span>''', html)
if not namematch:
Notify('big','Error Loading Sources','An error occured loading sources.\nCheck your connection and/or the Icefilms site.','')
callEndOfDirectory = False
return
else:
ice_meta['title'] = namematch.group(1)
year = re.search('\(([0-9]+)\)', namematch.group(1))
if year:
ice_meta['year'] = year.group(1)
try:
cache.set('videoname', namematch.group(1))
except:
addon.log_error("Failed to save video name")
pass
#If meta is enabled, we should have all needed info from previous screen so grab from list item that was clicked
if meta_setting=='true':
ice_meta['poster'] = xbmc.getInfoImage('ListItem.Thumb')
ice_meta['year'] = xbmc.getInfoLabel('ListItem.Year')
ice_meta['plot'] = xbmc.getInfoLabel('ListItem.Plot')
ice_meta['plot_outline'] = xbmc.getInfoLabel('ListItem.PlotOutline')
ice_meta['mpaa'] = xbmc.getInfoLabel('ListItem.Mpaa')
#Else we just use what we can grab from Icefilms site 'phantom' meta data
else:
#Set Plot
plot = re.search('<th>Description:</th><td>(.+?)<', html)
if plot:
ice_meta['plot'] = plot.group(1)
ice_meta['plot_outline'] = plot.group(1)
else:
ice_meta['plot'] = ''
ice_meta['plot_outline'] = ''
#Set Poster
imgcheck1 = re.search('<img width=250 src=(.+?) style', html)
if imgcheck1:
ice_meta['poster'] = imgcheck1.group(1)
imgcheck2 = re.search('<iframe src=/noref.php\?url=(.+?) width=', html)
if imgcheck2:
ice_meta['poster'] = imgcheck2.group(1)
#Set MPAA rating
mpaacheck=re.search('<th>MPAA Rating:</th><td>(.+?)</td>', html)
if mpaacheck:
mpaa=re.sub('Rated ','', mpaacheck)
ice_meta['mpaa'] = mpaa
else:
ice_meta['mpaa'] = ''
########### get and save potential file path. This is for use in download function later on.
epcheck1 = re.search('Episodes</a>', html)
epcheck2 = re.search('Episode</a>', html)
if epcheck1 or epcheck2:
if cache.get('mediatvshowname'):
#open media file if it exists, as that has show name with date.
showname=cache.get('mediatvshowname')
else:
#fall back to scraping show name without date from the page.
addon.log_debug('USING FALLBACK SHOW NAME')
fallbackshowname=re.compile("alt\='Show series\: (.+?)'").findall(html)
showname=fallbackshowname[0]
try:
#if season name file exists
if cache.get('mediatvshowname'):
seasonname=cache.get('mediatvshowname')
cache.set('mediapath','TV Shows/'+ Clean_Windows_String(showname) + '/' + Clean_Windows_String(seasonname))
else:
cache.set('mediapath','TV Shows/' + Clean_Windows_String(showname))
except:
addon.log_error("FAILED TO SAVE TV SHOW FILE PATH!")
else:
try:
cache.set('mediapath','Movies/' + Clean_Windows_String(namematch.group(1)))
except Exception, e:
addon.log_error('Failed to set cache value: %s' % e)
pass
#---------------End phantom metadata getting stuff --------------
match=re.compile('/membersonly/components/com_iceplayer/(.+?img=).*?" width=').findall(html)
match[0]=re.sub('%29',')',match[0])
match[0]=re.sub('%28','(',match[0])
for link in match:
mirrorpageurl = iceurl+'membersonly/components/com_iceplayer/' + link
html = GetURL(mirrorpageurl, save_cookie = True, use_cache=False)
#Show Ice Ad's
match = re.search('<iframe[^>]*src="([^"]+)', html)
if match:
show_ice_ad(urllib.quote(match.group(1)), mirrorpageurl)
#string for all text under hd720p border
defcat = re.compile('<div class=ripdiv><b>(.+?)</b>(.+?)</div>').findall(html)
for media_type, scrape in defcat:
if media_type == 'HD 720p+':
tag = ' | [COLOR red]HD[/COLOR]'
elif media_type == 'SD / DVD 480p':
tag = ' | [COLOR blue]DVD[/COLOR]'
elif media_type == 'DVD Screener':
tag = ' | [COLOR yellow]DVDSCR[/COLOR]'
elif media_type == 'R5/R6 DVDRip':
tag = ' | [COLOR green]R5/R6[/COLOR]'
else:
tag = ' | [COLOR white]Other[/COLOR]'
SOURCE(html, scrape, tag, ice_meta, video_url)
setView(None, 'default-view')
def determine_source(search_string, is_domain=False):
#Keep host list as global var - used to determine resolver and build/select auto play settings
host_list = [('180upload.com', '180Upload', 'resolve_180upload'),
('hugefiles.net', 'HugeFiles', 'resolve_hugefiles'),
('kingfiles.net', 'KingFiles', 'resolve_kingfiles'),
('clicknupload.com', 'ClicknUpload', 'resolve_clicknupload'),
('clicknupload.me', 'ClicknUpload', 'resolve_clicknupload'),
('upload.af', 'Upload', 'resolve_upload_af'),
('uploadx.org', 'UploadX', 'resolve_uploadx'),
('tusfiles.net', 'TusFiles', 'resolve_tusfiles'),
('xfileload.com', 'XfileLoad', 'resolve_xfileload'),
('mightyupload.com', 'MightyUpload', 'resolve_mightyupload'),
('donevideo.com', 'DoneVideo', 'resolve_donevideo'),
('vidplay.net', 'VidPlay', 'resolve_vidplay'),
('24uploading.com', '24Uploading', 'resolve_24uploading'),
('xvidstage.com', 'XVIDStage', 'resolve_xvidstage'),
('2shared.com', '2Shared', 'SHARED2_HANDLER')
]
try:
if is_domain:
hoster = re.search('https?://[www\.]*([^/]+)/', search_string)
if not hoster:
return None
domain = hoster.group(1)
host_index = [y[0] for y in host_list].index(domain)
else:
host_index = [y[1].lower() for y in host_list].index(search_string)
return host_list[host_index]
except Exception, e:
addon.log_error('Error determining source: %s' % e)
return None
def PART(scrap, sourcenumber, host, args, source_tag, ice_meta=None, video_url=None, debrid_hosts=None):
#check if source exists
sourcestring='Source #'+sourcenumber
checkforsource = re.search(sourcestring, scrap)
#if source exists proceed.
if checkforsource:
hoster = determine_source(host)
debrid_tag = ''
if debrid_hosts:
if hoster[0] in debrid_hosts:
debrid_tag = ' [COLOR yellow]*RD[/COLOR] '
#check if source contains multiple parts
multiple_part = re.search('<p>Source #'+sourcenumber+':', scrap)
if multiple_part:
addon.log_debug(sourcestring+' has multiple parts')
#get all text under source if it has multiple parts
multi_part_source=re.compile('<p>Source #'+sourcenumber+': (.+?)PART 1(.+?)</i><p>').findall(scrap)
#put scrape back together
for sourcescrape1,sourcescrape2 in multi_part_source:
scrape=sourcescrape1 + 'PART 1' + sourcescrape2
pair = re.compile("onclick='go\((\d+)\)'>PART\s+(\d+)").findall(scrape)
for id, partnum in pair:
#hoster = determine_source(host)
if hoster:
partname='Part '+ partnum
fullname=sourcestring + ' | ' + hoster[1] + debrid_tag + ' | ' + source_tag + partname
try:
sources = eval(cache.get("source"+str(sourcenumber)+"parts"))
except:
sources = {partnum: url}
addon.log_debug('sources havent been set yet...' )
sources[partnum] = url
cache.delete("source"+str(sourcenumber)+"parts")
cache.set("source"+str(sourcenumber)+"parts", repr(sources))
stacked = str2bool(addon.get_setting('stack-multi-part'))
if stacked and partnum == '1':
fullname = fullname.replace('Part 1', 'Multiple Parts')
addExecute(fullname, args, get_default_action(), ice_meta, stacked, video_url=video_url)
elif not stacked:
addExecute(fullname, args, get_default_action(), ice_meta, video_url=video_url)
# if source does not have multiple parts...
else:
# find corresponding '<a rel=?' entry and add as a one-link source
source5=re.compile('<a\s+rel='+sourcenumber+'.+?onclick=\'go\((\d+)\)\'>Source\s+#'+sourcenumber+':').findall(scrap)
for id in source5:
#hoster = determine_source(host)
if hoster:
fullname=sourcestring + ' | ' + hoster[1] + debrid_tag + source_tag + ' | Full '
addExecute(fullname, args, get_default_action(), ice_meta, video_url=video_url)
def SOURCE(page, sources, source_tag, ice_meta=None, video_url=None):
# get settings
# extract the ingredients used to generate the XHR request
#
# set here:
#
# sec: secret identifier: hardwired in the JS
# t: token: hardwired in the JS
# id: source ID in the link's onclick attribute (extracted in PART)
#
# set in GetSource:
#
# iqs: not used?
# url: not used?
# cap: form field for recaptcha? - always set to empty in the JS
# m: starts at 0, decremented each time a mousemove event is fired e.g. -123
# s: seconds since page loaded (> 5, < 250)
args = {}
match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', page)
args['sec'] = ''.join(match.groups(''))
args['t'] = re.search('"&t=([^"]+)",', page).group(1)
args['s'] = re.search('(?:\s+|,)s\s*=(\d+)', page).group(1)
args['m'] = re.search('(?:\s+|,)m\s*=(\d+)', page).group(1)
#add cached source
vidname=cache.get('videoname')
dlDir = Get_Path("noext", "", "")
listitem=Item_Meta(vidname)
try:
fdirs, fnames = xbmcvfs.listdir(dlDir)
for fname in fnames:
match = re.match(re.escape(vidname)+' *(.*)\.avi$', fname)
if match is not None:
if xbmcvfs.exists(os.path.join(dlDir,fname)+'.dling'):
listitem.setLabel("Play Downloading "+match.group(0))
addDownloadControls(match.group(0),os.path.join(dlDir,fname), listitem)
else:
listitem.setLabel("Play Local File" + match.group(0))
addLocal("Play Local File " + match.group(0), os.path.join(dlDir,fname), listitem)
except:
pass
#Find all hosts
debrid_hosts = None
debrid_account = str2bool(addon.get_setting('realdebrid-account'))
if debrid_account:
rd = debridroutines.RealDebrid()
try: debrid_hosts = rd.get_hosts()
except Exception, e:
addon.log_error(e)
pass
hosts = re.findall('<a\s+rel=[0-9]+.+?onclick=\'go\((\d+)\)\'>Source\s+#([0-9]+): (<span .+?</span>)</a>', sources)
for id, number, hoster in hosts:
host = re.sub('</span>', '', re.sub('<span .+?>', '', hoster)).lower()
args['id'] = id
PART(sources, number, host, args, source_tag, ice_meta, video_url, debrid_hosts)
setView(None, 'default-view')
def show_ice_ad(ad_url, referrer):
try:
headers = {'Referer': referrer}
# Import PyXBMCt module.
import pyxbmct.addonwindow as pyxbmct
# Create a window instance.
window = pyxbmct.AddonDialogWindow('Icefilms Advertisement')
# Set the window width, height, rows, columns.
window.setGeometry(450, 250, 6, 4)
if not ad_url.startswith('http:'): ad_url = 'http:' + ad_url
addon.log_debug('Found Ice advertisement url: %s' % ad_url)
html = net.http_GET(ad_url, headers=headers).content
for match in re.finditer("<img\s+src='([^']+)'\s+width='(\d+)'\s+height='(\d+)'", html):
img_url, width, height = match.groups()
addon.log_debug('Ice advertisement image url: %s' % img_url)
width = int(width)
height = int(height)
if width > 0 and height > 0:
# Ad image
image = pyxbmct.Image(img_url)
window.placeControl(image, 0, 0, rowspan=4, columnspan=4)
else:
temp = net.http_GET(img_url, headers=headers).content
# Create a button.
button = pyxbmct.Button('Close')
# Place the button on the window grid.
window.placeControl(button, 5, 1, columnspan=2)
# Set initial focus on the button.
window.setFocus(button)
# Connect the button to a function.
window.connect(button, window.close)
# Connect a key action to a function.
window.connect(pyxbmct.ACTION_NAV_BACK, window.close)
# Show the created window.
window.doModal()
match = re.search("href='([^']+)", html)
if match and random.randint(0, 100) < 5:
addon.log_debug('Ice advertisement - performing click on ad: %s' % match.group(1))
html = net.http_GET(match.group(1)).content
match = re.search("location=decode\('([^']+)", html)
if match:
html = net.http_GET(match.group(1)).content
except:
pass
finally:
window.close()
def GetURL(url, params = None, referrer = ICEFILMS_REFERRER, use_cookie = False, save_cookie = False, use_cache=True):
addon.log_debug('GetUrl: ' + url)
addon.log_debug('params: ' + repr(params))
addon.log_debug('referrer: ' + repr(referrer))
addon.log_debug('cookie: ' + repr(use_cookie))
addon.log_debug('save_cookie: ' + repr(save_cookie))
headers = {
'Referer': referrer,
'Accept': ACCEPT,
'User-Agent': USER_AGENT
}
page_cache = str2bool(addon.get_setting('use_page_cache'))
try:
if page_cache and use_cache:
html = db_connection.get_cached_url(url, 10)
if html:
addon.log_debug("Cached URL found for: %s" % url)
return html
else:
addon.log_debug("No cache found for: %s" % url)
if use_cookie:
net.set_cookies(ice_cookie)
addon.log_debug("Cookie set")
if params:
html = net.http_POST(url, params, headers=headers).content
else:
html = net.http_GET(url, headers=headers).content
if page_cache and use_cache:
db_connection.cache_url(url, html)
if save_cookie:
net.save_cookies(ice_cookie)
addon.log_debug("Cookie saved")
except Exception, e:
addon.log_error('****** ERROR: %s' % e)
Notify('big','Error Requesting Site','An error has occured communicating with Icefilms', '', '', 'Check your internet connection and the Icefilms site.' )
html = ''
pass
return html
############################################
## Helper Functions
############################################
#Quick helper function used to strip characters that are invalid for Windows filenames/folders
def Clean_Windows_String(string):
return re.sub('[^\w\-_\. ]', '', string)
#Helper function to convert strings to boolean values
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
#Int parse
def intTryParse(value):
try:
return int(value)
except ValueError:
return 0
def Get_Path(srcname, vidname, link):
##Gets the path the file will be downloaded to, and if necessary makes the folders##
#clean video name of unwanted characters
vidname = Clean_Windows_String(vidname)
#Get file extension from url
if link:
download_url = re.search('(^https?://[^|]*)', link).group(1)
else:
download_url = ''
import urlparse
path = urlparse.urlparse(download_url).path
ext = os.path.splitext(path)[1]
if xbmcvfs.exists(downloadPath):
#if source is split into parts, attach part number to the videoname.
if re.search('Part',srcname) is not None:
srcname=(re.split('\|+', srcname))[-1]
vidname=vidname + ' part' + ((re.split('\ +', srcname))[-1])
#add file extension
vidname = vidname + ext
elif srcname is not "noext":
#add file extension
vidname = vidname + ext
#is use special directory structure set to true?
SpecialDirs=addon.get_setting('use-special-structure')
if SpecialDirs == 'true':
mediapath=os.path.normpath(cache.get('mediapath'))
mediapath=os.path.join(downloadPath, mediapath)
if not xbmcvfs.exists(mediapath):
try:
xbmcvfs.mkdir(mediapath)
except Exception, e:
addon.log_error('Failed to create media path: %s' % mediapath)
addon.log_error('With error: %s' % e)
pass
finalpath=os.path.join(mediapath,vidname)
return finalpath
elif SpecialDirs == 'false':
mypath=os.path.join(downloadPath,vidname)
return mypath
else:
return 'path not set'
def Item_Meta(name, resume_point=0):
#Set metadata for playing video - allows trakt and scrobbling
#Also shows metadata when hitting Info button while playing
thumb_img = xbmc.getInfoImage('ListItem.Thumb')
vid_year = xbmc.getInfoLabel('ListItem.Year')
vid_plot = xbmc.getInfoLabel('ListItem.Plot')
plot_outline = xbmc.getInfoLabel('ListItem.PlotOutline')
mpaa = xbmc.getInfoLabel('ListItem.Mpaa')
#set name and description, unicode cleaned.
try: open_vidname=cache.get('videoname')
except:
vidname = ''
addon.log_error('OPENING VIDNAME FAILED!')
else:
try: get_vidname = htmlcleaner.clean(open_vidname)
except:
addon.log_error('CLEANING VIDNAME FAILED! :',open_vidname)
vidname = open_vidname
else: vidname = get_vidname
listitem = xbmcgui.ListItem(name)
video = get_video_name(vidname)
if video_type == 'movie':
listitem.setInfo(type="Video", infoLabels={'title': video['name'], 'year': vid_year, 'type': 'movie', 'plotoutline': plot_outline, 'plot': vid_plot, 'mpaa': mpaa})
if video_type == 'episode':
show = cache.get('tvshowname')
show = get_video_name(show)
ep_num = intTryParse(episode_num)
episode_season = intTryParse(season_num)
listitem.setInfo('video', {'title': video['name'], 'tvshowtitle': show['name'], 'year': vid_year, 'episode': episode_num, 'season': episode_season, 'type': 'episode', 'plotoutline': plot_outline, 'plot': vid_plot, 'mpaa': mpaa})
listitem.setProperty('StartOffset', str(resume_point))
listitem.setThumbnailImage(thumb_img)
return listitem
def handle_wait(time_to_wait,title,text):
addon.log_debug('Waiting '+str(time_to_wait)+' secs')
pDialog = xbmcgui.DialogProgress()
ret = pDialog.create(' '+title)
secs=0
percent=0
increment = float(100) / time_to_wait
increment = int(round(increment))
cancelled = False
while secs < time_to_wait:
secs = secs + 1
percent = increment*secs
secs_left = str((time_to_wait - secs))
remaining_display = ' Wait '+secs_left+' seconds for the video stream to activate...'
pDialog.update(percent,' '+ text, remaining_display)
xbmc.sleep(1000)
if (pDialog.iscanceled()):
cancelled = True
break
if cancelled == True:
addon.log_debug('Wait Cancelled')
return False
else:
addon.log_debug('Done Waiting')
return True
def Handle_Vidlink(url):
#Determine who our source is, grab all needed info
hoster = determine_source(url, is_domain=True)
#Using real-debrid to get the generated premium link
debrid_account = str2bool(addon.get_setting('realdebrid-account'))
link = None
if debrid_account:
rd = debridroutines.RealDebrid()
if rd.valid_host(hoster[0]):
if addon.get_setting('realdebrid_token'):
link = rd.get_media_url(url)
if not link:
Notify('big','Real-Debrid','Error occurred attempting to stream the file.', '', '', line3 = '**Attempting to resolve with original host instead..')
link = None
else:
addon.log_debug('Real-Debrid Link resolved: %s ' % link)
return link
if not link:
# Resolvers - Custom to Icefilms
import resolvers
#Dynamic call to proper resolve function returned from determine_source()
return getattr(resolvers, "%s" % hoster[2])(url)
def PlayFile(name,url):
listitem=Item_Meta(name)
addon.log_debug('Attempting to play local file')
try:
#directly call xbmc player (provides more options)
play_with_watched(url, listitem, '')
#xbmc.Player( xbmc.PLAYER_CORE_DVDPLAYER ).play( url, listitem )
except:
addon.log_error('local file playing failed')
def GetSource():
t = addon.queries.get('t', '')
id = addon.queries.get('id', '')
params = {
'iqs': '',
'url': '',
'cap': ' ',
'sec': addon.queries.get('sec', ''),
't': t,
'id': id,
'm' : int(addon.queries.get('m', '')) + random.randrange(20, 500),
's' : int(addon.queries.get('s', '')) + random.randrange(2, 500)
}
body = GetURL(ICEFILMS_AJAX % (id, t), params = params, referrer = ICEFILMS_AJAX_REFER % t, use_cookie=True, use_cache=False)
addon.log_debug('GetSource Response: %s' % body)
source = re.search('url=(http[^&]+)', body)
if source:
url = urllib.unquote(source.group(1))
else:
addon.log_debug('GetSource - URL String not found')
url = ''
addon.log_debug('GetSource URL: %s' % url)
return url
def get_resume_choice(video_id):
question = 'Resume from %s' % (format_time(db_connection.get_bookmark(video_id)))
return xbmcgui.Dialog().yesno('Resume?', question, '', '', 'Start from beginning', 'Resume') == 1
def format_time(seconds):
minutes, seconds = divmod(seconds, 60)
if minutes > 60:
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
else:
return "00:%02d:%02d" % (minutes, seconds)
def Stream_Source(name, download_play=False, download=False, download_jdownloader=False, stacked=False):
#Grab actual source url
url = GetSource()
addon.log_debug('Entering Stream Source with options - Name: %s Url: %s DownloadPlay: %s Download: %s Stacked: %s' % (name, url, download_play, download, stacked))
callEndOfDirectory = False
resume = False
use_resume = str2bool(addon.get_setting('resume-support'))
if use_resume:
if db_connection.bookmark_exists(video_url):
resume = get_resume_choice(video_url)
resume_point = 0
if resume:
resume_point = db_connection.get_bookmark(video_url)
addon.log_debug('Resuming video at: %s' % resume_point)
vidname=cache.get('videoname')
mypath = Get_Path(name, vidname, url)
listitem = Item_Meta(name, resume_point)
video_seeking = str2bool(addon.get_setting('video-seeking'))
last_part = False
current_part = 1
resume_threshhold = int(addon.get_setting('resume-threshhold'))
while not last_part:
#If it's a stacked source, grab url one by one
if stacked == True:
addon.log_debug('I AM STACKED')
url = get_stacked_part(name, str(current_part))
if url:
current_part += 1
#Check to see if it is the last part by attempting to grab the next
next_url = get_stacked_part(name, str(current_part))
if not next_url:
last_part = True
else:
last_part = True
break
else:
last_part = True
#Grab the final playable link
try:
link = Handle_Vidlink(url)
if link == None:
callEndOfDirectory = False
break
except Exception, e:
addon.log_error('**** Stream error: %s' % e)
Notify('big','Invalid Source','Unable to play selected source. \n Please try another.','', line3=str(e))
break
#Download & Watch
if download_play:
addon.log_debug('Starting Download & Play')
completed = Download_And_Play(name, link, video_seek=False)
addon.log_debug('Download & Play streaming completed: %s' % completed)
#Download option
elif download:
addon.log_debug('Starting Download')
completed = Download_Source(name, link, url)
addon.log_debug('Downloading completed: %s' % completed)
elif download_jdownloader:
addon.log_debug('Sent %s to JDownloader' % link)
xbmc.executebuiltin('XBMC.RunPlugin("plugin://plugin.program.jdownloader/?action=addlink&url=%s")' % (link))
Notify('Download Alert','Sent '+vidname+' to JDownloader','','')
completed = True
#Download & Watch - but delete file when done, simulates streaming and allows video seeking
#elif video_seeking:
# addon.log_debug('Starting Video Seeking')
# completed = Download_And_Play(name,link, video_seek=video_seeking)
# addon.log_debug('Video Seeking streaming completed: %s' % completed)
# CancelDownload(name, video_seek=video_seeking)
#Else play the file as normal stream
else:
addon.log_debug('Starting Normal Streaming')
completed = play_with_watched(link, listitem, mypath, last_part, resume_point, resume_threshhold)
addon.log_debug('Normal streaming completed: %s' % completed)
#Check if video was played until end - else assume user stopped watching video so break from loop
if not completed:
break
def play_with_watched(url, listitem, mypath, last_part=False, resume_point=0, resume_threshhold=1):
global currentTime
global totalTime
global watched_percent
global finalPart
finalPart = last_part
watched_percent = get_watched_percent()
useAxel = addon.get_setting('axel-proxy')
axelhelper = None
download_id = None
if useAxel == 'true':
import axelproxy as proxy
axelhelper = proxy.ProxyHelper()
url, download_id = axelhelper.create_proxy_url(url)
enable_recent = str2bool(addon.get_setting('recent-watched'))
mplayer = MyPlayer(axelhelper=axelhelper, download_id=download_id, ice_url=video_url, imdbid = imdbnum, season = season_num, episode=episode_num, resume_point=resume_point, resume_threshhold=resume_threshhold, enableRecent=enable_recent)
mplayer.play(url, listitem)
try:
video_time = mplayer.getTotalTime()
except Exception:
xbmc.sleep(20000) #wait 20 seconds until the video is playing before getting totalTime
try:
video_time = mplayer.getTotalTime()
except Exception, e:
addon.log_error('Error grabbing video time: %s' % e)
return False
#For stacked parts totalTime will need to be added up
temp_total = totalTime
totalTime = totalTime + video_time
addon.log_debug('******** VIDEO TIME: %s' % video_time)
addon.log_debug('******** TOTAL TIME: %s' % totalTime)
while(1):
try:
temp_current_time = mplayer.getTime()
currentTime= int(temp_current_time + temp_total)
except Exception:
addon.log_error('Kodi is not currently playing a media file')
break
xbmc.sleep(1000)
addon.log_debug('******** CURRENT TIME: %s' % currentTime)
#Check if video was played until the end (-1 second)
if temp_current_time < (video_time - 1):
return False
else:
return True
def get_watched_percent():
watched_values = [.7, .8, .9]
return watched_values[int(addon.get_setting('watched-percent'))]
def get_stacked_part(name, part):
sourcenumber = name[8:9]
source = eval(cache.get("source"+str(sourcenumber)+"parts"))
addon.log_debug('**** Stacked parts: %s' % source)
try:
url=source[part]
addon.log_debug('**** Stacked Part returning part #%s: %s' % (part, url))
return url
except:
addon.log_error('No more parts found')
return None
class MyPlayer (xbmc.Player):
def __init__ (self, axelhelper=None, download_id=None, ice_url=None, imdbid=None, season=None, episode=None, resume_point=0, resume_threshhold=1, enableRecent=False):
self.dialog = None
self.axelhelper = axelhelper
self.download_id = download_id
self.ice_url = ice_url
self.imdbid = imdbid
self.season = season
self.episode = episode
self.seek_time = resume_point
self.resume_threshhold = resume_threshhold
self.enableRecent = enableRecent
xbmc.Player.__init__(self)
addon.log_debug('Initializing myPlayer...')
def play(self, url, listitem):
addon.log_debug('Now im playing... %s' % url)
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(url, listitem)
def isplaying(self):
xbmc.Player.isPlaying(self)
def onPlayBackEnded(self):
global currentTime
global totalTime
global finalPart
#Stop Axel Downloader from running
if self.download_id:
self.axelhelper.stop_download(self.download_id)
if finalPart:
try: percentWatched = currentTime / totalTime
except: percentWatched = 0
addon.log_debug('current time: ' + str(currentTime) + ' total time: ' + str(totalTime) + ' percent watched: ' + str(percentWatched))
vidname=cache.get('videoname')
video = get_video_name(vidname)
if percentWatched >= watched_percent:
#set watched
addon.log_debug('Auto-Watch - Setting %s to watched' % video)
ChangeWatched(imdbnum, video_type, video['name'], season_num, episode_num, video['year'], watched=7)
#Clear bookmark
db_connection.clear_bookmark(self.ice_url)
#remove from Queue list
self.removeQueue(video)
# Set recently watched
self.setRecentWatched(video)
def onPlayBackStopped(self):
global currentTime
global totalTime
global finalPart
#Stop Axel Downloader from running
if self.download_id:
self.axelhelper.stop_download(self.download_id)
if finalPart:
try: percentWatched = currentTime / totalTime
except: percentWatched = 0
addon.log_debug('Playback stopped - current time: ' + str(currentTime) + ' total time: ' + str(totalTime) + ' percent watched: ' + str(percentWatched))
vidname=cache.get('videoname')
video = get_video_name(vidname)
if percentWatched >= watched_percent and totalTime > 1:
#set watched
addon.log_debug('Auto-Watch - Setting %s to watched' % video )
ChangeWatched(imdbnum, video_type, video['name'], season_num, episode_num, video['year'], watched=7)
#Clear bookmark
db_connection.clear_bookmark(self.ice_url)
#remove from Queue list
self.removeQueue(video)
elif currentTime >= (self.resume_threshhold * 60):
addon.log_debug('Setting resume bookmark: %s' % currentTime)
db_connection.set_bookmark(self.ice_url, currentTime)
# Set recently watched
self.setRecentWatched(video)
def setRecentWatched(self, video):
if self.enableRecent:
addon.log_debug('Setting recently watched: %s' % video['name'])
db_connection.set_watched(self.ice_url, video_type, video['name'], video['year'], self.season, self.episode, self.imdbid)
def removeQueue(self, video):
addon.log_debug('Removing watched Queue item: %s' % video['name'])
db_connection.clear_queue(self.ice_url)
############## End MyPlayer Class ################
class DownloadThread (threading.Thread):
def __init__(self, url, dest, vidname=False, video_seek=False):
self.url = url
self.dest = dest
self.vidname = vidname
self.video_seek = video_seek
self.dialog = None
threading.Thread.__init__(self)
def run(self):
#save the thread id to a .tid file. This file can then be read if the user navigates away from the
#download info page to get the thread ID again and generate the download info links
#the tid file will also denote a download in progress
#Note: if xbmc is killed during a download, the tid file will remain, therefore:
#TODO: add remove incomplete download link
save(self.dest + '.dling', 'dling')
#get settings
save(os.path.join(downloadPath,'Downloading'),self.dest+'\n'+self.vidname)
delete_incomplete = addon.get_setting('delete-incomplete-downloads')
start_time = time.time()
try:
urllib.urlretrieve(self.url, self.dest, lambda nb, bs, fs: _dlhook(nb, bs, fs, self, start_time))
if os.path.getsize(self.dest) < 10000:
addon.log_debug('Got a very small file')
raise SmallFile('Small File')
if self.dialog <> None:
self.dialog.close()
self.dialog = None
addon.log_debug('Download finished successfully')
try:
xbmcvfs.delete(self.dest + '.dling')
except:
pass
xbmcvfs.delete(os.path.join(downloadPath,'Downloading'))
except:
if self.dialog <> None:
self.dialog.close()
self.dialog = None
addon.log_debug('Download interrupted')
xbmcvfs.delete(os.path.join(downloadPath,'Downloading'))
#download is killed so remove .dling file
try:
xbmcvfs.delete(self.dest + '.dling')
except:
pass
if delete_incomplete == 'true':
#delete partially downloaded file if setting says to.
while xbmcvfs.exists(self.dest):
try:
xbmcvfs.delete(self.dest)
break
except:
pass
if sys.exc_info()[0] in (StopDownloading,) and not self.video_seek:
Notify('big','Download Canceled','Download has been canceled','')
else:
raise
def show_dialog(self):
self.dialog = xbmcgui.DialogProgress()
self.dialog.create('Downloading', '', self.vidname)
def hide_dialog(self):
self.dialog.close()
self.dialog = None
############## End DownloadThread Class ################
class StopDownloading(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SmallFile(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def Download_And_Play(name,url, video_seek=False):
#get proper name of vid
vidname=cache.get('videoname')
mypath=Get_Path(name, vidname, url)
addon.log_debug('MYPATH: %s' % mypath)
if mypath == 'path not set':
Notify('Download Alert','You have not set the download folder.\n Please access the addon settings and set it.','','')
return False
if xbmcvfs.exists(os.path.join(downloadPath, 'Ping')):
xbmcvfs.rmdir(os.path.join(downloadPath, 'Ping'))
if xbmcvfs.exists(os.path.join(downloadPath, 'Alive')):
xbmcvfs.rmdir(os.path.join(downloadPath, 'Alive'))
if xbmcvfs.exists(os.path.join(downloadPath, 'Downloading')):
fhPing = open(os.path.join(downloadPath, 'Ping'), 'w')
fhPing.close()
xbmc.sleep(1000)
if xbmcvfs.exists(os.path.join(downloadPath, 'Alive')):
fh = open(os.path.join(downloadPath, 'Alive'))
filePathAlive = fh.readline().strip('\n')
fileNameAlive = fh.readline().strip('\n')
fh.close()
try:
xbmcvfs.rmdir(os.path.join(downloadPath, 'Alive'))
except:
pass
Notify('Download Alert','Currently downloading '+fileNameAlive,'','')
addDownloadControls(fileNameAlive, filePathAlive)
return False
else:
xbmcvfs.rmdir(os.path.join(downloadPath, 'Ping'))
delete_incomplete = addon.get_setting('delete-incomplete-downloads')
if delete_incomplete == 'true':
if xbmcvfs.exists(os.path.join(downloadPath, 'Downloading')):
fh = open(os.path.join(downloadPath, 'Downloading'))
filePathDownloading = fh.readline().strip('\n')
fh.close()
try:
xbmcvfs.rmdir(filePathDownloading)
except:
pass
try:
xbmcvfs.rmdir(filePathDownloading + '.dling')
except:
pass
if xbmcvfs.exists(os.path.join(downloadPath, 'Downloading')):
xbmcvfs.rmdir(os.path.join(downloadPath, 'Downloading'))
if os.path.isfile(mypath) is True:
if os.path.isfile(mypath + '.dling'):
try:
xbmcvfs.delete(mypath)
xbmcvfs.delete(mypath + '.dling')
except:
addon.log_error('download failed: existing incomplete files cannot be removed')
return False
else:
Notify('Download Alert','The video you are trying to download already exists!','','')
addon.log_debug('Attempting to download and play file')
try:
addon.log_debug("Starting Download Thread")
dlThread = DownloadThread(url, mypath, vidname, video_seek)
dlThread.start()
buffer_delay = int(addon.get_setting('buffer-delay'))
handle_wait(buffer_delay, "Buffering", "Waiting a bit before playing...")
if not handle_wait:
return False
if xbmcvfs.exists(mypath):
if dlThread.isAlive():
listitem=Item_Meta(name)
#Play file
completed = play_with_watched(mypath, listitem, '')
if video_seek:
if xbmcvfs.exists(mypath):
try:
xbmcvfs.delete(mypath)
except:
addon.log_error('Failed to delete file after video seeking')
else:
addDownloadControls(name,mypath, listitem)
#Return if video was played until the end
if not completed:
return False
else:
return True
else:
raise
else:
raise
except Exception, e:
addon.log_error('EXCEPTION %s' % e)
if sys.exc_info()[0] in (urllib.ContentTooShortError,):
Notify('big','Download and Play failed!','Error: Content Too Short','')
if sys.exc_info()[0] in (OSError,):
Notify('big','Download and Play failed!','Error: Cannot write file to disk','')
if sys.exc_info()[0] in (SmallFile,):
Notify('big','Download and Play failed!','Error: Got a file smaller than 10KB','')
callEndOfDirectory = False
def _dlhook(numblocks, blocksize, filesize, dt, start_time):
if dt.dialog != None:
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0:
eta = (filesize - numblocks * blocksize) / kbps_speed
else:
eta = 0
kbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '%.02f MB of %.02f MB' % (currently_downloaded, total)
e = 'Speed: %.02f Kb/s ' % kbps_speed
e += 'ETA: %02d:%02d' % divmod(eta, 60)
dt.dialog.update(percent, mbs, e)
except:
percent = 100
dt.dialog.update(percent)
if dt.dialog.iscanceled():
dt.hide_dialog()
elif xbmcvfs.exists(os.path.join(downloadPath, 'ShowDLInfo')):
while xbmcvfs.exists(os.path.join(downloadPath, 'ShowDLInfo')):
try:
xbmcvfs.rmdir(os.path.join(downloadPath, 'ShowDLInfo'))
except:
continue
break
dt.show_dialog()
elif xbmcvfs.exists(os.path.join(downloadPath, 'Cancel')):
while xbmcvfs.exists(os.path.join(downloadPath, 'Cancel')):
try:
xbmcvfs.rmdir(os.path.join(downloadPath, 'Cancel'))
except:
continue
break
addon.log_debug("Stopping download")
raise StopDownloading('Stopped Downloading')
elif xbmcvfs.exists(os.path.join(downloadPath, 'Ping')):
while xbmcvfs.exists(os.path.join(downloadPath, 'Ping')):
try:
xbmcvfs.rmdir(os.path.join(downloadPath, 'Ping'))
except:
continue
break
save(os.path.join(downloadPath,'Alive'),dt.dest+'\n'+dt.vidname)
def Download_Source(name, url, referer, stacked=False):
#get proper name of vid
vidname=cache.get('videoname')
mypath=Get_Path(name, vidname, url)
if mypath == 'path not set':
Notify('Download Alert','You have not set the download folder.\n Please access the addon settings and set it.','','')
return False
else:
if os.path.isfile(mypath) is True:
Notify('Download Alert','The video you are trying to download already exists!','','')
return False
else:
import commondownloader
download_url = re.search('(^https?://[^|]*)', url).group(1)
commondownloader.download(download_url, mypath, 'Icefilms', referer=referer, agent=USER_AGENT)
#commondownloader.download(url, mypath, 'Icefilms', referer=referer, agent=USER_AGENT)
# DownloadInBack=addon.get_setting('download-in-background')
# addon.log_debug('attempting to download file, silent = '+ DownloadInBack)
# try:
# if DownloadInBack == 'true':
# completed = QuietDownload(url, mypath, vidname)
# return completed
# else:
# completed = Download(url, mypath, vidname)
# return completed
# except:
# addon.log_error('download failed')
# return False
def Kill_Streaming(name,url):
xbmc.Player().stop()
class StopDownloading(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def Download(url, dest, displayname=False):
if displayname == False:
displayname=url
dp = xbmcgui.DialogProgress()
dp.create('Downloading', '', displayname)
start_time = time.time()
try:
urllib.urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time))
except:
delete_incomplete = addon.get_setting('delete-incomplete-downloads')
if delete_incomplete == 'true':
#delete partially downloaded file if setting says to.
while xbmcvfs.exists(dest):
try:
xbmcvfs.delete(dest)
break
except:
pass
#only handle StopDownloading (from cancel), ContentTooShort (from urlretrieve), and OS (from the race condition); let other exceptions bubble
if sys.exc_info()[0] in (urllib.ContentTooShortError, StopDownloading, OSError):
return False
else:
raise
return False
return True
def QuietDownload(url, dest, videoname):
#quote parameters passed to download script
q_url = urllib.quote_plus(url)
q_dest = urllib.quote_plus(dest)
q_vidname = urllib.quote_plus(videoname)
#Create possible values for notification
notifyValues = [2, 5, 10, 20, 25, 50, 100]
# get notify value from settings
NotifyPercent=int(addon.get_setting('notify-percent'))
try:
script = os.path.join( icepath, 'resources', 'lib', "DownloadInBackground.py" )
xbmc.executebuiltin( "RunScript(%s, %s, %s, %s, %s)" % ( script, q_url, q_dest, q_vidname, str(notifyValues[NotifyPercent]) ) )
return True
except Exception, e:
addon.log_error('*** Error in Quiet Download: %s' % e)
return False
def _pbhook(numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0:
eta = (filesize - numblocks * blocksize) / kbps_speed
else:
eta = 0
kbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '%.02f MB of %.02f MB' % (currently_downloaded, total)
e = 'Speed: %.02f Kb/s ' % kbps_speed
e += 'ETA: %02d:%02d' % divmod(eta, 60)
dp.update(percent, mbs, e)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
raise StopDownloading('Stopped Downloading')
def addExecute(name, args, mode, ice_meta, stacked=False, video_url=None):
# A list item that executes the next mode, but doesn't clear the screen of current list items.
#encode url and name, so they can pass through the sys.argv[0] related strings
sysname = urllib.quote_plus(name)
sysurl = urllib.quote_plus(ICEFILMS_AJAX)
argsenc = urllib.urlencode(args)
u = sys.argv[0] + "?url=" + sysurl + "&mode=" + str(mode) + "&name=" + sysname + "&imdbnum=" + urllib.quote_plus(str(imdbnum)) + "&videoType=" + str(video_type) + "&season=" + str(season_num) + "&episode=" + str(episode_num) + "&stackedParts=" + str(stacked) + "&" + str(argsenc) + '&videoUrl=' + urllib.quote_plus(video_url)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=ice_meta['poster'])
liz.setInfo( type="Video", infoLabels={ "Title": name, 'year': ice_meta['year'], 'type': 'movie', 'plotoutline': ice_meta['plot_outline'], 'plot': ice_meta['plot'], 'mpaa': ice_meta['mpaa']})
liz.setProperty('totalTime', '1' )
liz.setProperty('resumeTime', '0')
#handle adding context menus
contextMenuItems = []
contextMenuItems.append(('Play Stream', 'XBMC.RunPlugin(%s?mode=200&name=%s&url=%s&stackedParts=%s&%s)' % (sys.argv[0], sysname, sysurl, stacked, argsenc)))
contextMenuItems.append(('Download', 'XBMC.RunPlugin(%s?mode=201&name=%s&url=%s&stackedParts=%s&%s)' % (sys.argv[0], sysname, sysurl, stacked, argsenc)))
contextMenuItems.append(('Download And Watch', 'XBMC.RunPlugin(%s?mode=206&name=%s&url=%s&stackedParts=%s&%s)' % (sys.argv[0], sysname, sysurl, stacked, argsenc)))
contextMenuItems.append(('Download with jDownloader', 'XBMC.RunPlugin(%s?mode=202&name=%s&url=%s&stackedParts=%s&%s)' % (sys.argv[0], sysname, sysurl, stacked, argsenc)))
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def addDir(name, url, mode, iconimage, meta=False, imdb=False, delfromfav=False, disablefav=False, searchMode=False, totalItems=0, disablewatch=False, meta_install=False, favourite=False, recentWatched=False, queueList=False):
### addDir with context menus and meta support ###
#encode url and name, so they can pass through the sys.argv[0] related strings
sysname = urllib.quote_plus(name.encode('utf8'))
sysurl = urllib.quote_plus(url.encode('utf8'))
dirmode=mode
#get nice unicode name text.
#name has to pass through lots of weird operations earlier in the script,
#so it should only be unicodified just before it is displayed.
name = htmlcleaner.clean(name)
#handle adding context menus
contextMenuItems = []
if mode == 12: # TV series
videoType = 'tvshow'
elif mode == 13: # TV Season
videoType = 'season'
elif mode == 14: # TV Episode
videoType = 'episode'
elif mode == 100: # movies
videoType = 'movie'
else:
videoType = video_type
season = ''
episode = ''
if season_num:
season = season_num
if episode_num:
episode = episode_num
#handle adding meta
if meta == False:
liz = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
else:
#check covers installed
covers_url = ''
if mode == 12:
#check tv posters vs banners setting
tv_posters = addon.get_setting('tv-posters')
if tv_posters == 'true':
#if meta_install['tv_covers'] == 'true':
covers_url = meta['cover_url']
else:
#if meta_install['tv_banners'] == 'true':
covers_url = meta['banner_url']
else:
#if meta_install['movie_covers'] == 'true':
covers_url = meta['cover_url']
#Set XBMC list item
liz = xbmcgui.ListItem(name, iconImage=covers_url, thumbnailImage=covers_url)
liz.setInfo(type="Video", infoLabels=meta)
#Set fanart/backdrop setting variables
movie_fanart = addon.get_setting('movie-fanart')
tv_fanart = addon.get_setting('tv-fanart')
# mark as watched or unwatched
addWatched = False
if mode == 12: # TV series
if int(meta['episode']) > 0:
episodes_unwatched = str(int(meta['episode']) - meta['playcount'])
liz.setProperty('UnWatchedEpisodes', episodes_unwatched)
liz.setProperty('WatchedEpisodes', str(meta['playcount']))
addWatched = True
#if tv_fanart == 'true' and tv_fanart_installed == 'true':
if tv_fanart == 'true':
liz.setProperty('fanart_image', meta['backdrop_url'])
contextMenuItems.append(('Show Information', 'XBMC.Action(Info)'))
if favourite:
next_aired = str2bool(addon.get_setting('next-aired'))
if next_aired:
contextMenuItems.append(('Show Next Aired', 'RunScript(%s)' % os.path.join(icepath, 'resources/script.tv.show.next.aired/default.py')))
elif mode == 13: # TV Season
addWatched = True
#if tv_fanart == 'true' and tv_fanart_installed == 'true':
if tv_fanart == 'true':
liz.setProperty('fanart_image', meta['backdrop_url'])
season = meta['season']
contextMenuItems.append(('Refresh Info', 'XBMC.RunPlugin(%s?mode=998&name=%s&url=%s&imdbnum=%s&dirmode=%s&videoType=%s&season=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), dirmode, videoType, season)))
elif mode == 14: # TV Episode
addWatched = True
if tv_fanart == 'true':
liz.setProperty('fanart_image', meta['backdrop_url'])
season = meta['season']
episode = meta['episode']
if not queueList and not recentWatched:
contextMenuItems.append(('Add to Queue List', 'XBMC.RunPlugin(%s?mode=add_queue&name=%s&url=%s&imdbnum=%s&dirmode=%s&videoType=%s&season=%s&episode=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), dirmode, videoType, season, episode)))
contextMenuItems.append(('Episode Information', 'XBMC.Action(Info)'))
contextMenuItems.append(('Refresh Info', 'XBMC.RunPlugin(%s?mode=997&name=%s&url=%s&imdbnum=%s&dirmode=%s&videoType=%s&season=%s&episode=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), dirmode, videoType, season, episode)))
elif mode == 100: # movies
addWatched = True
if movie_fanart == 'true':
liz.setProperty('fanart_image', meta['backdrop_url'])
if not queueList and not recentWatched:
contextMenuItems.append(('Add to Queue List', 'XBMC.RunPlugin(%s?mode=add_queue&name=%s&url=%s&imdbnum=%s&dirmode=%s&videoType=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), dirmode, videoType)))
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
contextMenuItems.append(('Search for Similar', 'XBMC.RunPlugin(%s?mode=991&name=%s&url=%s&tmdbnum=%s&dirmode=%s&videoType=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(meta['tmdb_id'])), dirmode, videoType)))
#Add Refresh & Trailer Search context menu
if searchMode==False:
if mode in (12, 100):
contextMenuItems.append(('Refresh Info', 'XBMC.RunPlugin(%s?mode=999&name=%s&url=%s&imdbnum=%s&dirmode=%s&videoType=%s)' % (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), dirmode, videoType)))
contextMenuItems.append(('Search for trailer',
'XBMC.RunPlugin(%s?mode=996&name=%s&url=%s&dirmode=%s&imdbnum=%s)'
% (sys.argv[0], sysname, sysurl, dirmode, urllib.quote_plus(str(imdb))) ))
#Add Watch/Unwatch context menu
if addWatched and not disablewatch:
if meta['overlay'] == 6:
watchedMenu='Mark as Watched'
else:
watchedMenu='Mark as Unwatched'
if searchMode==False:
contextMenuItems.append((watchedMenu, 'XBMC.RunPlugin(%s?mode=990&name=%s&url=%s&imdbnum=%s&videoType=%s&season=%s&episode=%s)'
% (sys.argv[0], sysname, sysurl, urllib.quote_plus(str(imdb)), videoType, season, episode)))
# add/delete favourite
if disablefav is False: # disable fav is necessary for the scrapes in the homepage category.
if delfromfav is True:
#settings for when in the Favourites folder
contextMenuItems.append(('Delete from Ice Favourites', 'XBMC.RunPlugin(%s?mode=111&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
else:
#if directory is an tv show or movie NOT and episode
if mode == 100 or mode == 12:
if imdb is not False:
sysimdb = urllib.quote_plus(str(imdb))
else:
#if no imdb number, it will have no metadata in Favourites
sysimdb = urllib.quote_plus('nothing')
#if searchMode==False:
contextMenuItems.append(('Add to Ice Favourites', 'XBMC.RunPlugin(%s?mode=110&name=%s&url=%s&imdbnum=%s&videoType=%s)' % (sys.argv[0], sysname, sysurl, sysimdb, videoType)))
if recentWatched:
contextMenuItems.append(('Delete from Watched List', 'XBMC.RunPlugin(%s?mode=remove_watched&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if queueList:
contextMenuItems.append(('Delete from Queue List', 'XBMC.RunPlugin(%s?mode=remove_queue&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if contextMenuItems:
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
if mode == 14:
if check_episode(name):
episode_info = re.search('([0-9]+)x([0-9]+)', name)
season = int(episode_info.group(1))
episode = int(episode_info.group(2))
mode = 100
if mode in (12, 13, 100, 101):
u = sys.argv[0] + "?url=" + sysurl + "&mode=" + str(mode) + "&name=" + sysname + "&imdbnum=" + urllib.quote_plus(str(imdb)) + "&videoType=" + videoType + "&season=" + str(season) + "&episode=" + str(episode)
else:
u = sys.argv[0] + "?url=" + sysurl + "&mode=" + str(mode) + "&name=" + sysname
ok = True
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True, totalItems=totalItems)
return ok
#VANILLA ADDDIR (kept for reference)
def VaddDir(name, url, mode, iconimage, is_folder=False):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=is_folder)
return ok
def setView(content, viewType):
# set content type so library shows more views and info
if content:
xbmcplugin.setContent(int(sys.argv[1]), content)
if addon.get_setting('auto-view') == 'true':
xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.get_setting(viewType) )
# set sort methods - probably we don't need all of them
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_LABEL )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RATING )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_DATE )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_PROGRAM_COUNT )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RUNTIME )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_GENRE )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_MPAA_RATING )
def cleanUnicode(string):
try:
string = string.replace("'","").replace(unicode(u'\u201c'), '"').replace(unicode(u'\u201d'), '"').replace(unicode(u'\u2019'),'').replace(unicode(u'\u2026'),'...').replace(unicode(u'\u2018'),'').replace(unicode(u'\u2013'),'-')
return string
except:
return string
def ADD_ITEM(metaget, meta_installed, imdb_id,url,name,mode,num_of_eps=False, totalitems=0):
#clean name of unwanted stuff
name=CLEANUP(name)
if url.startswith('http://www.icefilms.info') == False:
url=iceurl+url
#append number of episodes to the display name, AFTER THE NAME HAS BEEN USED FOR META LOOKUP
if num_of_eps is not False:
name = name + ' ' + str(num_of_eps)
if meta_installed and meta_setting=='true':
#return the metadata dictionary
#we want a clean name with the year separated for proper meta search and storing
meta_name = CLEANUP_FOR_META(name)
r=re.search('(.+?) [(]([0-9]{4})[)]',meta_name)
if r:
meta_name = r.group(1)
year = r.group(2)
else:
year = ''
if mode==100:
#return the metadata dictionary
meta=metaget.get_meta('movie', meta_name, imdb_id=imdb_id, year=year)
elif mode==12:
#return the metadata dictionary
meta=metaget.get_meta('tvshow', meta_name, imdb_id=imdb_id)
addDir(name,url,mode,'',meta=meta,imdb='tt'+str(imdb_id),totalItems=totalitems, meta_install=meta_installed)
else:
#add directories without meta
if imdb_id == None:
imdb_id == ''
else:
imdb_id = 'tt'+str(imdb_id)
addDir(name,url,mode,'',imdb=imdb_id,totalItems=totalitems)
def REFRESH(videoType, url,imdb_id,name,dirmode):
#refresh info for a Tvshow or movie
addon.log_debug('In Refresh ' + str(sys.argv[1]))
imdb_id = imdb_id.replace('tttt','')
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
if meta_installed:
name=CLEANUP(name)
r=re.search('(.+?) [(]([0-9]{4})[)]',name)
if r:
name = r.group(1)
year = r.group(2)
else:
year = ''
metaget.update_meta(videoType, name, imdb_id, year=year)
xbmc.executebuiltin("XBMC.Container.Refresh")
def episode_refresh(url, imdb_id, name, dirmode, season, episode):
#refresh info for an episode
addon.log_debug('In Episode Refresh ' + str(sys.argv[1]))
imdb_id = imdb_id.replace('tttt','')
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
if meta_installed:
name=CLEANUP(name)
metaget.update_episode_meta(name, imdb_id, season, episode)
xbmc.executebuiltin("XBMC.Container.Refresh")
def season_refresh(url, imdb_id, name, dirmode, season):
#refresh info for an episode
addon.log_debug('In Season Refresh ' + str(sys.argv[1]))
imdb_id = imdb_id.replace('tttt','')
if meta_setting=='true':
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
if meta_installed:
name=CLEANUP(name)
metaget.update_season(name, imdb_id, season)
xbmc.executebuiltin("XBMC.Container.Refresh")
def get_episode(season, episode, imdb_id, url, metaget, meta_installed, tmp_season_num=-1, tmp_episode_num=-1, totalitems=0):
# displays all episodes in the source it is passed.
imdb_id = imdb_id.replace('t','')
#add with metadata
if metaget:
#clean name of unwanted stuff
episode=CLEANUP(episode)
#Get tvshow name - don't want the year portion
showname=cache.get('tvshowname')
r=re.search('(.+?) [(][0-9]{4}[)]',showname)
if r:
showname = r.group(1)
#return the metadata dictionary
ep = re.search('[0-9]+x([0-9]+)', episode)
if ep:
tmp_episode_num = int(ep.group(1))
se = re.search('Season ([0-9]{1,2})', season)
if se:
tmp_season_num = int(se.group(1))
meta = {}
if meta_installed and tmp_episode_num >= 0:
showname = CLEANUP_FOR_META(showname)
meta=metaget.get_episode_meta(showname, imdb_id, tmp_season_num, tmp_episode_num)
if meta and meta_installed:
#add directories with meta
addDir(episode,iceurl+url,14,'',meta=meta,imdb='tt'+str(imdb_id),totalItems=totalitems, meta_install=meta_installed)
else:
#add directories without meta
addDir(episode,iceurl+url,14,'',imdb='tt'+str(imdb_id),totalItems=totalitems)
#add without metadata -- imdb is still passed for use with Add to Favourites
else:
episode=CLEANUP(episode)
addDir(episode,iceurl+url,14,'',imdb='tt'+str(imdb_id),totalItems=totalitems)
def find_meta_for_search_results(results, mode, search=''):
#initialise meta class before loop
metaget=metahandlers.MetaData()
meta_installed = metaget.check_meta_installed(addon_id)
if mode == 100:
for res in results:
name=res.title.encode('utf8')
name=CLEANSEARCH(name)
url=res.url.encode('utf8')
url=re.sub('&','&',url)
if check_episode(name):
mode = 14
else:
mode = 100
if meta_installed and meta_setting=='true':
meta = check_video_meta(name, metaget)
addDir(name,url,mode,'',meta=meta,imdb=meta['imdb_id'],searchMode=True, meta_install=meta_installed)
else:
addDir(name,url,mode,'',searchMode=True)
elif mode == 12:
for myurl,interim,name in results:
if len(interim) < 180:
name=CLEANSEARCH(name)
hasnameintitle=re.search(search,name,re.IGNORECASE)
if hasnameintitle:
myurl='http://www.icefilms.info/tv/series'+myurl
myurl=re.sub('&','',myurl)
if myurl.startswith('http://www.icefilms.info/tv/series'):
if meta_installed==True and meta_setting=='true':
meta = metaget.get_meta('tvshow',name)
addDir(name,myurl,12,'',meta=meta,imdb=meta['imdb_id'],searchMode=True)
else:
addDir(name,myurl,12,'',searchMode=True)
else:
addDir(name,myurl,12,'',searchMode=True)
def SearchGoogle(search):
gs = GoogleSearch(''+search+' site:http://www.youtube.com ')
gs.results_per_page = 25
gs.page = 0
try:
results = gs.get_results()
except Exception, e:
addon.log_error('***** Error: %s' % e)
Notify('big','Google Search','Error encountered searching.','')
return None
return results
def SearchForTrailer(search, imdb_id, type, manual=False):
search = search.replace(' [COLOR red]*HD*[/COLOR]', '')
res_name = []
res_url = []
res_name.append('Manualy enter search...')
if manual:
results = SearchGoogle(search)
for res in results:
if res.url.encode('utf8').startswith('http://www.youtube.com/watch'):
res_name.append(res.title.encode('utf8'))
res_url.append(res.url.encode('utf8'))
else:
results = SearchGoogle(search+' official trailer')
for res in results:
if res.url.encode('utf8').startswith('http://www.youtube.com/watch'):
res_name.append(res.title.encode('utf8'))
res_url.append(res.url.encode('utf8'))
results = SearchGoogle(search[:(len(search)-7)]+' official trailer')
for res in results:
if res.url.encode('utf8').startswith('http://www.youtube.com/watch') and res.url.encode('utf8') not in res_url:
res_name.append(res.title.encode('utf8'))
res_url.append(res.url.encode('utf8'))
dialog = xbmcgui.Dialog()
ret = dialog.select(search + ' trailer search',res_name)
# Manual search for trailer
if ret == 0:
if manual:
default = search
title = 'Manual Search for '+search
else:
default = search+' official trailer'
title = 'Manual Trailer Search for '+search
keyboard = xbmc.Keyboard(default, title)
#keyboard.setHiddenInput(hidden)
keyboard.doModal()
if keyboard.isConfirmed():
result = keyboard.getText()
SearchForTrailer(result, imdb_id, type, manual=True)
# Found trailers
elif ret > 0:
trailer_url = res_url[ret - 2]
xbmc.executebuiltin(
"PlayMedia(plugin://plugin.video.youtube/?action=play_video&videoid=%s&quality=720p)"
% str(trailer_url)[str(trailer_url).rfind("v=")+2:] )
metaget=metahandlers.MetaData()
if type=='100':
media_type='movie'
elif type=='12':
media_type='tvshow'
metaget.update_trailer(media_type, imdb_id, trailer_url)
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
res_name.append('Nothing Found. Thanks!!!')
def ChangeWatched(imdb_id, videoType, name, season, episode, year='', watched='', refresh=False):
metaget=metahandlers.MetaData()
metaget.change_watched(videoType, name, imdb_id, season=season, episode=episode, year=year, watched=watched)
if refresh:
xbmc.executebuiltin("XBMC.Container.Refresh")
def SimilarMovies(tmdb_id):
metaget=metahandlers.MetaData()
movie_list = metaget.similar_movies(tmdb_id)
name_list = []
filtered_movie_list = []
if movie_list:
for movie in movie_list:
if movie['id'] != None:
filtered_movie_list.append(movie)
name_list.append(movie['title'])
dialog = xbmcgui.Dialog()
index = dialog.select('Select a movie to search in Icefilms', name_list)
if index > -1:
xbmc.executebuiltin("XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=0)" % (sys.argv[0], iceurl, name_list[index]))
def addLocal(name,filename, listitem=False):
if listitem == None:
liz=xbmcgui.ListItem(name)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
else:
liz = listitem
ok=True
liz=xbmcgui.ListItem(name)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=filename,listitem=liz,isFolder=False)
return ok
def addDownloadControls(name,localFilePath, listitem=None):
#encode name
sysname = urllib.quote_plus(name)
statusUrl = sys.argv[0] + "?mode=207&name=" + sysname
cancelUrl = sys.argv[0] + "?&mode=208&name=" + sysname
ok = True
#add Download info
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=statusUrl,listitem=xbmcgui.ListItem("Download Info"),isFolder=False)
addon.log_debug('Ok: %s' % ok)
#add Cancel Download
ok = ok and xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=cancelUrl,listitem=xbmcgui.ListItem("Cancel Download"),isFolder=False)
addon.log_debug('Ok: %s' % ok)
#add Play File
ok = ok and addLocal("Play Downloading " + name, localFilePath, listitem)
addon.log_debug('Ok: %s' % ok)
return ok
def ShowDownloadInfo(name):
if not xbmcvfs.exists(os.path.join(downloadPath, 'Downloading')):
Notify('big','Download Inactive!','Download is not active','')
else:
save(os.path.join(downloadPath, 'ShowDLInfo'),'ShowDLInfo')
return True
def CancelDownload(name, video_seek=False):
if not xbmcvfs.exists(os.path.join(downloadPath, 'Downloading')):
if not video_seek:
Notify('big','Download Inactive!','Download is not active','')
else:
save(os.path.join(downloadPath, 'Cancel'),'Cancel')
return True
def get_default_action():
action_setting = addon.get_setting('play-action')
addon.log_debug("action_setting =" + action_setting)
if action_setting == "1":
return 201
elif action_setting == "2":
return 206
#default is stream
return 200
def show_addon_help():
# Import PyXBMCt module.
import pyxbmct.addonwindow as pyxbmct
try:
common_addon_version = 'Unknown'
from addon import common
common_addon_version = common.common.addon_version
except Exception, e:
addon.log_debug('Failed to import addon.common: %s' % e)
pass
try:
axel_addon_version = 'Unknown'
from axel import axelcommon
axel_addon_version = axelcommon.addon_version
except Exception, e:
addon.log_debug('Failed to import axelcommon: %s' % e)
pass
# Create a window instance.
window = pyxbmct.AddonDialogWindow('Icefilms XBMC Addon Help')
# Set the window width, height, rows, columns.
window.setGeometry(850, 600, 12, 8)
# Icefilms logo
image = pyxbmct.Image(addon.get_icon())
window.placeControl(image, 0, 0, rowspan=3, columnspan=2)
# Addon current information
textBox = pyxbmct.TextBox(textColor='0xFFFFFFFF')
window.placeControl(textBox, 0, 2, columnspan=5, rowspan=2)
textBox.setText('[B]Author:[/B] %s\n[B]Current version:[/B] %s\n[B]Support:[/B] www.tvaddons.ag.com' % (addon.get_author(), addon.get_version()))
#Installed dependencies
textBox = pyxbmct.TextBox(textColor='0xFFFFFFFF')
window.placeControl(textBox, 3, 0, columnspan=7, rowspan=3)
textBox.setText('[B]Installed Dependencies:[/B]\n [B]Metahandlers:[/B] %s \n [B]Common addon methods:[/B] %s \n [B]Axel Downloader:[/B] %s' % (metahandler_version, common_addon_version, axel_addon_version))
# Folder locations
label = pyxbmct.Label('[B]Installed location:[/B] \n[B]Data Location:[/B]')
window.placeControl(label, 6, 0, columnspan=2)
fadeLabel = pyxbmct.FadeLabel(textColor='0xFFFFFFFF')
window.placeControl(fadeLabel, 6, 2, columnspan=6)
fadeLabel.addLabel('%s\n%s' % (addon.get_path(), addon.get_profile()))
#Addon description
textBox = pyxbmct.TextBox(textColor='0xFFFFFFFF')
window.placeControl(textBox, 7, 0, columnspan=7, rowspan=4)
textBox.setText(addon.get_description())
# Create a button.
button = pyxbmct.Button('Close')
# Place the button on the window grid.
window.placeControl(button, 11, 3, columnspan=2)
# Set initial focus on the button.
window.setFocus(button)
# Connect the button to a function.
window.connect(button, window.close)
# Connect a key action to a function.
window.connect(pyxbmct.ACTION_NAV_BACK, window.close)
# Show the created window.
window.doModal()
def flush_cache():
dlg = xbmcgui.Dialog()
ln1 = 'Are you sure you want to '
ln2 = 'delete the url cache?'
ln3 = 'This will slow things down until rebuilt'
yes = 'Keep'
no = 'Delete'
if dlg.yesno('Flush web cache', ln1, ln2, ln3, yes, no):
db_connection.flush_cache()
def reset_db():
if db_connection.reset_db():
message='DB Reset Successful'
else:
message='Reset only allowed on SQLite DBs'
Notify('small','Icefilms', message,'')
if mode=='main': #or url==None or len(url)<1:
CATEGORIES()
elif mode=='991':
addon.log_debug("Mode 991 ******* dirmode is " + str(dirmode) + " ************* url is -> " + url)
SimilarMovies(tmdbnum)
elif mode=='999':
addon.log_debug( "Mode 999 ******* dirmode is " + str(dirmode) + " ************* url is -> " + url)
REFRESH(video_type, url,imdbnum,name,dirmode)
elif mode=='998':
addon.log_debug( "Mode 998 (season meta refresh) ******* dirmode is " + str(dirmode) + " ************* url is -> "+url)
season_refresh(url,imdbnum,name,dirmode,season_num)
elif mode=='997':
addon.log_debug( "Mode 997 (episode meta refresh) ******* dirmode is " + str(dirmode) + " ************* url is -> "+url)
episode_refresh(url,imdbnum,name,dirmode,season_num,episode_num)
elif mode=='996':
addon.log_debug( "Mode 996 (trailer search) ******* name is " + str(name) + " ************* url is -> "+url)
SearchForTrailer(name, imdbnum, dirmode)
elif mode=='990':
addon.log_debug( "Mode 990 (Change watched value) ******* name is " + str(name) + " ************* season is -> '"+season_num+"'" + " ************* episode is -> '"+episode_num+"'")
ChangeWatched(imdbnum, video_type, name, season_num, episode_num, refresh=True)
elif mode=='addon_help':
show_addon_help()
elif mode=='flush_cache':
flush_cache()
elif mode=='reset_rd':
rd = debridroutines.RealDebrid()
rd.clear_client()
Notify('small','Icefilms', 'Successfully reset Real-Debrid authorization','')
elif mode=='reset_db':
reset_db()
elif mode=='clear_watched':
clear_watched()
elif mode=='clear_tv_watched':
clear_watched(VideoType_TV)
elif mode=='clear_movie_watched':
clear_watched(VideoType_Movies)
elif mode=='clear_episode_watched':
clear_watched(VideoType_Episode)
elif mode=='remove_watched':
remove_watched()
elif mode=='clear_queue':
clear_queue()
elif mode=='clear_tv_queue':
clear_queue(VideoType_TV)
elif mode=='clear_movie_queue':
clear_queue(VideoType_Movies)
elif mode=='clear_episode_queue':
clear_queue(VideoType_Episode)
elif mode=='remove_queue':
remove_queue()
elif mode=='add_queue':
add_queue()
elif mode=='50':
TVCATEGORIES(url)
elif mode=='51':
MOVIECATEGORIES(url)
elif mode=='52':
MUSICCATEGORIES(url)
elif mode=='53':
STANDUPCATEGORIES(url)
elif mode=='54':
OTHERCATEGORIES(url)
elif mode=='55':
SEARCH(url)
elif mode=='57':
FAVOURITES(url)
elif mode=='58':
addon.log_debug( "Metahandler Settings")
import metahandler
metahandler.display_settings()
callEndOfDirectory = False
elif mode=='570':
getFavourites(VideoType_TV)
elif mode=='571':
getFavourites(VideoType_Movies)
elif mode=='572':
get_recent_watched(VideoType_Movies)
elif mode=='573':
get_recent_watched(VideoType_Episode)
elif mode=='574':
get_queue_list(VideoType_Movies)
elif mode=='575':
get_queue_list(VideoType_Episode)
elif mode=='58':
CLEAR_FAVOURITES(url)
elif mode=='60':
RECENT(url)
elif mode=='61':
LATEST(url)
elif mode=='62':
WATCHINGNOW(url)
elif mode=='recent_watched':
recently_watched()
elif mode=='recent_watched_movie':
get_recent_watched(VideoType_Movies)
elif mode=='recent_watched_tv':
get_recent_watched(VideoType_TV)
elif mode=='recent_watched_episode':
get_recent_watched(VideoType_Episode)
elif mode=='watch_queue':
watch_queue()
elif mode=='watch_queue_movie':
get_queue_list(VideoType_Movies)
elif mode=='watch_queue_tv':
get_queue_list(VideoType_TV)
elif mode=='watch_queue_episode':
get_queue_list(VideoType_Episode)
elif mode=='63':
HD720pCat(url)
elif mode=='64':
Genres(url)
elif mode=='70':
Action(url)
elif mode=='71':
Animation(url)
elif mode=='72':
Comedy(url)
elif mode=='73':
Documentary(url)
elif mode=='74':
Drama(url)
elif mode=='75':
Family(url)
elif mode=='76':
Horror(url)
elif mode=='77':
Romance(url)
elif mode=='78':
SciFi(url)
elif mode=='79':
Thriller(url)
elif mode=='1':
MOVIEA2ZDirectories(url)
elif mode=='2':
MOVIEINDEX(url)
elif mode=='10':
TVA2ZDirectories(url)
elif mode=='11':
TVINDEX(url)
elif mode=='12':
TVSEASONS(url,imdbnum)
elif mode=='13':
TVEPISODES(name,url,None,imdbnum)
# Some tv shows will not be correctly identified, so to load their sources need to check on mode==14
elif mode=='14':
LOADMIRRORS(url)
elif mode=='100':
LOADMIRRORS(url)
elif mode=='110':
# if you dont use the "url", "name" params() then you need to define the value# along with the other params.
ADD_TO_FAVOURITES(name, url, imdbnum, video_type)
elif mode=='111':
DELETE_FROM_FAVOURITES(url)
elif mode=='200':
Stream_Source(name, stacked=stacked_parts)
elif mode=='201':
Stream_Source(name, download=True, stacked=stacked_parts)
#Download_Source(name,url)
elif mode=='202':
Stream_Source(name, stacked=stacked_parts, download_jdownloader=True)
elif mode=='203':
Kill_Streaming(name,url)
elif mode=='205':
PlayFile(name,url)
elif mode=='206':
Stream_Source(name, download_play=True, stacked=stacked_parts)
#Download_And_Play(name,url)
elif mode=='207':
ShowDownloadInfo(name)
elif mode=='208':
CancelDownload(name)
elif mode=='555':
addon.log_debug("Mode 555 (Get More...) ******* search string is " + search + " ************* nextPage is " + nextPage)
DoSearch(url, search, int(nextPage))
elif mode=='5555':
addon.log_debug("Mode 5555 (Predefined Search...) ******* search string is " + search)
KnownSearch(search, url)
elif mode=='666':
create_meta_pack()
if callEndOfDirectory and int(sys.argv[1]) <> -1:
xbmcplugin.endOfDirectory(int(sys.argv[1]))
#xbmcplugin.endOfDirectory(int(sys.argv[1]))
| azumimuo/family-xbmc-addon | plugin.video.icefilms/default.py | Python | gpl-2.0 | 145,284 | [
"VisIt"
] | 90651877ccec4b49ddf18b2ed24d48397dff05e6bf53632ae2a7e718ecda5a41 |
from django.test import TestCase
from splinter import Browser
class TestBaseViews(TestCase):
def setUp(self):
self.browser = Browser('chrome')
def tearDown(self):
self.browser.quit()
def test_home(self):
self.browser.visit('http://localhost:8000')
test_string = 'Hello, world!'
if self.browser.is_text_present(test_string):
self.assertTrue(True)
def test_robots(self):
self.browser.visit('http://localhost:8000/robots.txt')
if self.browser.is_text_present('robotstxt'):
self.assertTrue(True)
def test_humans(self):
self.browser.visit('http://localhost:8000/humans.txt')
if self.browser.is_text_present('humanstxt'):
self.assertTrue(True)
| tosp/ProjectStud | base/test_views.py | Python | mit | 773 | [
"VisIt"
] | af0ac93b9e58006becd1e9ebe52aeafa70a3fe339ac5ef4975d1d82eb347cf39 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Specie
from pymatgen.analysis.bond_valence import BVAnalyzer, calculate_bv_sum, \
calculate_bv_sum_unordered
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class BVAnalyzerTest(PymatgenTest):
def setUp(self):
self.analyzer = BVAnalyzer()
def test_get_valence(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
ans = [1, 1, 3, 3, 4, 4, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("LiFePO4")
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("Li3V2(PO4)3")
ans = [1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = Structure.from_file(os.path.join(test_dir, "Li4Fe3Mn1(PO4)4.json"))
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("NaFePO4")
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
def test_get_oxi_state_structure(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
news = self.analyzer.get_oxi_state_decorated_structure(s)
self.assertIn(Specie("Mn", 3), news.composition.elements)
self.assertIn(Specie("Mn", 4), news.composition.elements)
class BondValenceSumTest(PymatgenTest):
def test_calculate_bv_sum(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
neighbors = s.get_neighbors(s[0], 3.0)
bv_sum = calculate_bv_sum(s[0], neighbors)
self.assertAlmostEqual(bv_sum, 0.7723402182087497, places=5)
def test_calculate_bv_sum_unordered(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
s[0].species = Composition("Li0.5Na0.5")
neighbors = s.get_neighbors(s[0], 3.0)
bv_sum = calculate_bv_sum_unordered(s[0], neighbors)
self.assertAlmostEqual(bv_sum, 1.5494662306918852, places=5)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| tschaume/pymatgen | pymatgen/analysis/tests/test_bond_valence.py | Python | mit | 2,988 | [
"pymatgen"
] | abb1fbe52f48451ec7b2ed25d8f395ab6d141d75c69b1d6cd2262948ae2e214d |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def offset_gaussian():
# Connect to a pre-existing cluster
insurance = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
gbm = H2OGradientBoostingEstimator(ntrees=600,
max_depth=1,
min_rows=1,
learn_rate=0.1,
distribution="gaussian")
gbm.train(x=range(3), y="Claims", training_frame=insurance, offset_column="offset")
predictions = gbm.predict(insurance)
# Comparison result generated from R's gbm:
# fit2 <- gbm(Claims ~ District + Group + Age+ offset(log(Holders)) , interaction.depth = 1,n.minobsinnode = 1,
# shrinkage = .1,bag.fraction = 1,train.fraction = 1,
# data = Insurance, distribution ="gaussian", n.trees = 600)
# pg = predict(fit2, newdata = Insurance, type = "response", n.trees=600)
# pr = pg - - log(Insurance$Holders)
assert abs(44.33016 - gbm._model_json['output']['init_f']) < 1e-5, "expected init_f to be {0}, but got {1}". \
format(44.33016, gbm._model_json['output']['init_f'])
assert abs(1491.135 - gbm.mse()) < 1e-2, "expected mse to be {0}, but got {1}".format(1491.135, gbm.mse())
assert abs(49.23438 - predictions.mean()[0]) < 1e-2, "expected prediction mean to be {0}, but got {1}". \
format(49.23438, predictions.mean()[0])
assert abs(-45.5720659304 - predictions.min()) < 1e-2, "expected prediction min to be {0}, but got {1}". \
format(-45.5720659304, predictions.min())
assert abs(207.387 - predictions.max()) < 1e-2, "expected prediction max to be {0}, but got {1}". \
format(207.387, predictions.max())
if __name__ == "__main__":
pyunit_utils.standalone_test(offset_gaussian)
else:
offset_gaussian()
| madmax983/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_offset_gaussian_gbm.py | Python | apache-2.0 | 1,986 | [
"Gaussian"
] | 52cff547ae27fd4069344dee2d560e5beeb73ca9d08d66d6c3e8de76bb347ad3 |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
import numpy as np
from ...core.parameterization.parameterized import Parameterized
from kernel_slice_operations import KernCallsViaSlicerMeta
from ...util.caching import Cache_this
from GPy.core.parameterization.observable_array import ObsAr
class Kern(Parameterized):
#===========================================================================
# This adds input slice support. The rather ugly code for slicing can be
# found in kernel_slice_operations
__metaclass__ = KernCallsViaSlicerMeta
#===========================================================================
_support_GPU=False
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
"""
The base class for a kernel: a positive definite function
which forms of a covariance function (kernel).
input_dim:
is the number of dimensions to work on. Make sure to give the
tight dimensionality of inputs.
You most likely want this to be the integer telling the number of
input dimensions of the kernel.
If this is not an integer (!) we will work on the whole input matrix X,
and not check whether dimensions match or not (!).
active_dims:
is the active_dimensions of inputs X we will work on.
All kernels will get sliced Xes as inputs, if active_dims is not None
Only positive integers are allowed in active_dims!
if active_dims is None, slicing is switched off and all X will be passed through as given.
:param int input_dim: the number of input dimensions to the function
:param array-like|None active_dims: list of indices on which dimensions this kernel works on, or none if no slicing
Do not instantiate.
"""
super(Kern, self).__init__(name=name, *a, **kw)
self.input_dim = int(input_dim)
if active_dims is None:
active_dims = np.arange(input_dim)
self.active_dims = np.atleast_1d(active_dims).astype(int)
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dims={}".format(self.input_dim, self.active_dims.size, self.active_dims)
self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
self._return_psi2_n_flag = ObsAr(np.zeros(1)).astype(bool)
@property
def return_psi2_n(self):
"""
Flag whether to pass back psi2 as NxMxM or MxM, by summing out N.
"""
return self._return_psi2_n_flag[0]
@return_psi2_n.setter
def return_psi2_n(self, val):
def visit(self):
if isinstance(self, Kern):
self._return_psi2_n_flag[0]=val
self.traverse(visit)
@Cache_this(limit=20)
def _slice_X(self, X):
return X[:, self.active_dims]
def K(self, X, X2):
"""
Compute the kernel function.
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
handLes this as X2 == X.
"""
raise NotImplementedError
def Kdiag(self, X):
raise NotImplementedError
def psi0(self, Z, variational_posterior):
raise NotImplementedError
def psi1(self, Z, variational_posterior):
raise NotImplementedError
def psi2(self, Z, variational_posterior):
raise NotImplementedError
def gradients_X(self, dL_dK, X, X2):
raise NotImplementedError
def gradients_X_diag(self, dL_dKdiag, X):
raise NotImplementedError
def update_gradients_diag(self, dL_dKdiag, X):
""" update the gradients of all parameters when using only the diagonal elements of the covariance matrix"""
raise NotImplementedError
def update_gradients_full(self, dL_dK, X, X2):
"""Set the gradients of all parameters when doing full (N) inference."""
raise NotImplementedError
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Set the gradients of all parameters when doing inference with
uncertain inputs, using expectations of the kernel.
The esential maths is
dL_d{theta_i} = dL_dpsi0 * dpsi0_d{theta_i} +
dL_dpsi1 * dpsi1_d{theta_i} +
dL_dpsi2 * dpsi2_d{theta_i}
"""
raise NotImplementedError
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Returns the derivative of the objective wrt Z, using the chain rule
through the expectation variables.
"""
raise NotImplementedError
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Compute the gradients wrt the parameters of the variational
distruibution q(X), chain-ruling via the expectations of the kernel
"""
raise NotImplementedError
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
"""
plot this kernel.
:param x: the value to use for the other kernel argument (kernels are a function of two variables!)
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title: the matplotlib title
:param plot_limits: the range over which to plot the kernel
:resolution: the resolution of the lines used in plotting
:mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7)
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
kernel_plots.plot(self, x, fignum, ax, title, plot_limits, resolution, **mpl_kwargs)
def plot_ARD(self, *args, **kw):
"""
See :class:`~GPy.plotting.matplot_dep.kernel_plots`
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
return kernel_plots.plot_ARD(self,*args,**kw)
def input_sensitivity(self, summarize=True):
"""
Returns the sensitivity for each dimension of this kernel.
"""
return np.zeros(self.input_dim)
def __add__(self, other):
""" Overloading of the '+' operator. for more control, see self.add """
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def add(self, other, name='add'):
"""
Add another kernel to this one.
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be added to kernels..."
from add import Add
return Add([self, other], name=name)
def __mul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __imul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __pow__(self, other):
"""
Shortcut for tensor `prod`.
"""
assert np.all(self.active_dims == range(self.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
assert np.all(other.active_dims == range(other.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
other.active_dims += self.input_dim
return self.prod(other)
def prod(self, other, name='mul'):
"""
Multiply two kernels (either on the same space, or on the tensor
product of the input space).
:param other: the other kernel to be added
:type other: GPy.kern
:param tensor: whether or not to use the tensor space (default is false).
:type tensor: bool
"""
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
from prod import Prod
#kernels = []
#if isinstance(self, Prod): kernels.extend(self.parameters)
#else: kernels.append(self)
#if isinstance(other, Prod): kernels.extend(other.parameters)
#else: kernels.append(other)
return Prod([self, other], name)
def _check_input_dim(self, X):
assert X.shape[1] == self.input_dim, "{} did not specify active_dims and X has wrong shape: X_dim={}, whereas input_dim={}".format(self.name, X.shape[1], self.input_dim)
def _check_active_dims(self, X):
assert X.shape[1] >= len(self.active_dims), "At least {} dimensional X needed, X.shape={!s}".format(len(self.active_dims), X.shape)
class CombinationKernel(Kern):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
"""
def __init__(self, kernels, name, extra_dims=[]):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
:param list kernels: List of kernels to combine (can be only one element)
:param str name: name of the combination kernel
:param array-like extra_dims: if needed extra dimensions for the combination kernel to work on
"""
assert all([isinstance(k, Kern) for k in kernels])
extra_dims = np.array(extra_dims, dtype=int)
input_dim, active_dims = self.get_input_dim_active_dims(kernels, extra_dims)
# initialize the kernel with the full input_dim
super(CombinationKernel, self).__init__(input_dim, active_dims, name)
self.extra_dims = extra_dims
self.link_parameters(*kernels)
@property
def parts(self):
return self.parameters
def get_input_dim_active_dims(self, kernels, extra_dims = None):
#active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int))
#active_dims = np.array(np.concatenate((active_dims, extra_dims if extra_dims is not None else [])), dtype=int)
input_dim = reduce(max, (k.active_dims.max() for k in kernels)) + 1
if extra_dims is not None:
input_dim += extra_dims.size
active_dims = np.arange(input_dim)
return input_dim, active_dims
def input_sensitivity(self, summarize=True):
"""
If summize is true, we want to get the summerized view of the sensitivities,
otherwise put everything into an array with shape (#kernels, input_dim)
in the order of appearance of the kernels in the parameterized object.
"""
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. You need to override the default behaviour for getting the input sensitivity to be able to get the input sensitivity. For sum kernel it is the sum of all sensitivities, TODO: product kernel? Other kernels?, also TODO: shall we return all the sensitivities here in the combination kernel? So we can combine them however we want? This could lead to just plot all the sensitivities here...")
def _check_active_dims(self, X):
return
def _check_input_dim(self, X):
# As combination kernels cannot always know, what their inner kernels have as input dims, the check will be done inside them, respectively
return
| strongh/GPy | GPy/kern/_src/kern.py | Python | bsd-3-clause | 11,880 | [
"VisIt"
] | 0b311f5a7532795c41378d4563913bb06f5a95ca7abeed08a789c8a72b785b35 |
__author__ = "mfreer"
__date__ = "2011-09-15 17:09"
__version__ = "1.6"
__all__ = ["FileCore", "get_file_list"]
import glob
import logging
class FileCore(object):
"""
Abstract class which holds basic file access methods and attributes.
Designed to be subclassed by NetCDF, NASA Ames and basic text file
classes.
**Constructor Variables**
:param string filename: Optional -
Name of file to open.
:param char perms: Optional -
Permissions used to open file. Options are ``w`` for write (overwrites data in file),
``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value
"""
def __init__(self, filename=None, perms='r', **kwargs):
"""
Initializes file instance.
:param string filename: Optional -
Name of file to open.
:param char perms: Optional -
Permissions used to open file. Options are ``w`` for write (overwrites data in file),
``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value
"""
logging.debug('egads - input_core.py - FileCore - __init__ - filename ' + str(filename) +
', perms ' + perms + ', kwargs ' + str(kwargs))
self.f = None
self.filename = filename
self.perms = perms
for key, val in kwargs.iteritems():
setattr(self, key, val)
if filename is not None:
self._open_file(filename, perms)
def open(self, filename, perms=None):
"""
Opens file given filename.
:param string filename:
Name of file to open.
:param char perms: Optional -
Permissions used to open file. Options are ``w`` for write (overwrites data in file),
``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value
"""
logging.debug('egads - input_core.py - FileCore - open - filename ' + str(filename) + ', perms ' + str(perms))
if perms is not None:
self.perms = perms
else:
perms = self.perms
self._open_file(filename, perms)
def close(self):
"""
Close opened file.
"""
logging.debug('egads - input_core.py - FileCore - close - filename ' + str(self.filename))
if self.f is not None:
self.f.close()
self.f = None
self.filename = None
def get_perms(self):
"""
Returns the current permissions on the file that is open. Returns None if
no file is currently open. Options are ``w`` for write (overwrites
data in file),``a`` and ``r+`` for append, and ``r`` for read.
"""
logging.debug('egads - input_core.py - FileCore - get_perms - perms ' + str(self.perms))
if self.f is not None:
return self.perms
else:
return
def get_filename(self):
"""
If file is open, returns the filename.
"""
logging.debug('egads - input_core.py - FileCore - get_filename - filename ' + str(self.filename))
return self.filename
logging.info('egads - input_core.py - FileCore has been loaded')
def get_file_list(path):
"""
Given path, returns a list of all files in that path. Wildcards are supported.
Example::
file_list = get_file_list('data/*.nc')
"""
logging.debug('egads - input_core.py - get_file_list - path ' + str(path))
return glob.glob(path)
| eufarn7sp/egads-eufar | egads/input/input_core.py | Python | bsd-3-clause | 3,537 | [
"NetCDF"
] | d12c47d7219bcc5dc0bf122b64b8b7ff5ff5183e2cf66cea3c950021e43260fd |
'''This file contains code for the unit:Number Theory. '''
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import ndtri
import pylab
'''Method that returns returns value of the gaussian given an input array and mean and standard deviation'''
def Normal(x, mu,sigma):
return np.exp(- (x-mu)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
plt.show()
'''Counting 25 events 40000 times'''
Events25 = np.random.rand(1000000) #generate 25*40000 = 1,000,000 random numbers
Counters25 = np.zeros(40000) #generate an array with 40000 entries all set to 0
for value in Events25:
Place = int(40000 * value) #Scale the random values to range between 0 to 40000
Counters25[Place] +=1 #Increment counts for the value as per the scaled value
####Plot- The result of counting 25 events 40000 times as well as the errors, one sigma, one percent, 99 percent
###See figure - Count 25 Events 40000 times
plt.figure("Count 25 Events 40000 times")
Numcounts25, binedges25, patches = plt.hist(Counters25, bins = 50, range = (0,50), color = "green", alpha = 0.5) #plot histogram with 50 bins. Store Number of counts/bin and bin edges
centers25 = 0.5*(binedges25[1:] + binedges25[:-1]) #Computing bin centers as means of the bin edge values
y25 = 40000 * Normal(centers25, 25, np.sqrt(25)) #Compute the y values(as per the gaussian function)
xbar25 = np.zeros(2)
ybar25 = np.zeros(2)
xbar25[0] = 25 - np.sqrt(25) #Compute the one sigma values as
xbar25[1] = 25 + np.sqrt(25) #mean +-error(on the mean value)
ybar25 = 40000*Normal(xbar25, 25, np.sqrt(25)) #Computing y values as per the gaussian function for the X values
plt.plot(xbar25, ybar25, color= "red", alpha = 1.0, lw =5) #plot the line joining the 2 one sigma points
plt.plot(centers25, y25, alpha = 1.0, color = "red", lw =5) #plot the gaussian function passing through the center of each bin
errors25 = np.sqrt(y25) #Compute the expected error on Y-values
plt.errorbar(centers25, y25, yerr = errors25, linestyle='None', linewidth = 3.0, markeredgewidth = 3.0, marker ='o', color = 'black', markersize= 5.0 ) #Plot the errors on Y values
prob1percent25 = 25 + np.sqrt(25) * ndtri(0.01) #compute the 1% point - x value
prob99percent25 = 25 + np.sqrt(25) * ndtri(0.99) #compute the 99% point - x value
y1percent25 = 40000*Normal(prob1percent25, 25, np.sqrt(25)) #compute the 1% point - y value
y99percent25 = 40000*Normal(prob99percent25, 25, np.sqrt(25)) #compute the 99% point - y value
#Perform labelling operations for the plots
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-75,50), xy = (prob1percent25, y1percent25))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(30,50), xy = (prob99percent25, y99percent25))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (20,ybar25[0]), xytext = (-70,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (30,ybar25[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("25 Events Counted 40000 times", backgroundcolor = "white")
'''A similar experiment as above is performed with 250 events being performed 40000 times. Refer to the documentation of the above section.'''
Events250 = np.random.rand(10000000)
Counters250 = np.zeros(40000)
for value in Events250:
Place = int(40000 * value)
Counters250[Place] +=1
####Plot- The result of counting 250 events 40000 times as well as the errors, one sigma, one percent, 99 percent. This is identocal to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 40000 times
plt.figure("Count 250 Events 40000 times")
Numcounts250, binedges250, patches = plt.hist(Counters250, bins = 200, range = (150,350), color = "green", alpha = 0.5)
centers250 = 0.5*(binedges250[1:] + binedges250[:-1])
y250 = 40000 * Normal(centers250, 250, np.sqrt(250))
errors250 = np.sqrt(y250)
xbar250 = np.zeros(2)
ybar250 = np.zeros(2)
xbar250[0] = 250 - np.sqrt(250)
xbar250[1] = 250 + np.sqrt(250)
ybar250 = 40000*Normal(xbar250, 250, np.sqrt(250))
plt.plot(xbar250, ybar250, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250, y250, alpha = 1.0, color = "red", lw =5)
plt.errorbar(centers250, y250, yerr = errors250, linestyle='None', linewidth = 3.0, markeredgewidth = 3.0, marker ='o', color = 'black', markersize= 5.0 )
prob1percent250 = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250 = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250 = 4000*Normal(prob1percent250, 250, np.sqrt(250))
y99percent250 = 4000*Normal(prob99percent250, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-75,50), xy = (prob1percent250, y1percent250))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(30,50), xy = (prob99percent250, y99percent250))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250[0],ybar250[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250[1],ybar250[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 40000 times", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 400. It is performed with 2 different seeds for random numbers'''
####First random set
Events250A = np.random.rand(100000)
Counters250A = np.zeros(400)
for value in Events250A:
Place = int(400 * value)
Counters250A[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times I
plt.figure("Count 250 Events 400 times I")
Numcounts250A, binedges250A, patches = plt.hist(Counters250A, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250A = 0.5*(binedges250A[1:] + binedges250A[:-1])
y250A = 400 * Normal(centers250A, 250, np.sqrt(250))
xbar250A = np.zeros(2)
ybar250A = np.zeros(2)
xbar250A[0] = 250 - np.sqrt(250)
xbar250A[1] = 250 + np.sqrt(250)
ybar250A = 400*Normal(xbar250A, 250, np.sqrt(250))
plt.plot(xbar250A, ybar250A, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250A, y250A, alpha = 1.0, color = "red", lw =5)
prob1percent250A = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250A = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250A = 400*Normal(prob1percent250A, 250, np.sqrt(250))
y99percent250A = 400*Normal(prob99percent250A, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250A, y1percent250A))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250A, y99percent250A))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[0],ybar250A[0]), xytext = (-100,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[1],ybar250A[1]), xytext = (40,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. One Seed", backgroundcolor = "white")
### Second random set.
Events250A = np.random.rand(100000)
Counters250A = np.zeros(400)
for value in Events250A:
Place = int(400 * value)
Counters250A[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent with separate seed. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times II
plt.figure("Count 250 Events 400 times II")
Numcounts250A, binedges250A, patches = plt.hist(Counters250A, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250A = 0.5*(binedges250A[1:] + binedges250A[:-1])
y250A = 400 * Normal(centers250A, 250, np.sqrt(250))
xbar250A = np.zeros(2)
ybar250A = np.zeros(2)
xbar250A[0] = 250 - np.sqrt(250)
xbar250A[1] = 250 + np.sqrt(250)
ybar250A = 400*Normal(xbar250A, 250, np.sqrt(250))
plt.plot(xbar250A, ybar250A, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250A, y250A, alpha = 1.0, color = "red", lw =5)
prob1percent250A = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250A = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250A = 400*Normal(prob1percent250A, 250, np.sqrt(250))
y99percent250A = 400*Normal(prob99percent250A, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250A, y1percent250A))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250A, y99percent250A))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[0],ybar250A[0]), xytext = (-100,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[1],ybar250A[1]), xytext = (40,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Another Seed", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 400. It is performed with 2 different seeds for random numbers. The number of bins is decreased to 20.'''
###First set of random numbers
Events250C = np.random.rand(100000)
Counters250C = np.zeros(400)
for value in Events250C:
Place = int(400 * value)
Counters250C[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent. The number of bins is decreased to 20. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times Larger Bins
plt.figure("Count 250 Events 400 times Larger Bins.")
Numcounts250C, binedges250C, patches = plt.hist(Counters250C, bins = 20, range = (200,300), color = "green", alpha = 0.5)
centers250C = 0.5*(binedges250C[1:] + binedges250C[:-1])
y250C = 2000 * Normal(centers250C, 250, np.sqrt(250))
xbar250C = np.zeros(2)
ybar250C = np.zeros(2)
xbar250C[0] = 250 - np.sqrt(250)
xbar250C[1] = 250 + np.sqrt(250)
ybar250C = 2000*Normal(xbar250C, 250, np.sqrt(250))
plt.plot(xbar250C, ybar250C, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250C, y250C, alpha = 1.0, color = "red", lw =5)
prob1percent250C = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250C = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250C = 2000*Normal(prob1percent250C, 250, np.sqrt(250))
y99percent250C = 2000*Normal(prob99percent250C, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250C, y1percent250C))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250C, y99percent250C))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[0],ybar250C[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[1],ybar250C[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Larger Bins(5).", backgroundcolor = "white")
#second set of random numbers
Events250C = np.random.rand(100000)
Counters250C = np.zeros(400)
for value in Events250C:
Place = int(400 * value)
Counters250C[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent with separate seed. The number of bins is decreased to 20 The number of bins is decreased to 20. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times Larger Bins. Another Seed
plt.figure("Count 250 Events 400 times Larger Bins. Another Seed")
Numcounts250C, binedges250C, patches = plt.hist(Counters250C, bins = 20, range = (200,300), color = "green", alpha = 0.5)
centers250C = 0.5*(binedges250C[1:] + binedges250C[:-1])
y250C = 2000 * Normal(centers250C, 250, np.sqrt(250))
xbar250C = np.zeros(2)
ybar250C = np.zeros(2)
xbar250C[0] = 250 - np.sqrt(250)
xbar250C[1] = 250 + np.sqrt(250)
ybar250C = 2000*Normal(xbar250C, 250, np.sqrt(250))
plt.plot(xbar250C, ybar250C, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250C, y250C, alpha = 1.0, color = "red", lw =5)
prob1percent250C = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250C = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250C = 2000*Normal(prob1percent250C, 250, np.sqrt(250))
y99percent250C = 2000*Normal(prob99percent250C, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250C, y1percent250C))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250C, y99percent250C))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[0],ybar250C[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[1],ybar250C[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Larger Bins(5). Another Seed", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 4000. It is performed with 2 different seeds for random numbers. The number of bins is 100.'''
###Random set 1
Events250B = np.random.rand(1000000)
Counters250B = np.zeros(4000)
for value in Events250B:
Place = int(4000 * value)
Counters250B[Place] +=1
####Plot- The result of counting 250 events 4000 times as well as the errors, one sigma, one percent, 99 percent with separate seed.This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 4000 times
plt.figure("Count 250 Events 4000 times")
Numcounts250B, binedges250B, patches = plt.hist(Counters250B, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250B = 0.5*(binedges250B[1:] + binedges250B[:-1])
y250B = 4000 * Normal(centers250B, 250, np.sqrt(250))
xbar250B = np.zeros(2)
ybar250B = np.zeros(2)
xbar250B[0] = 250 - np.sqrt(250)
xbar250B[1] = 250 + np.sqrt(250)
ybar250B = 4000*Normal(xbar250B, 250, np.sqrt(250))
plt.plot(xbar250B, ybar250B, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250B, y250B, alpha = 1.0, color = "red", lw =5)
prob1percent250B = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250B = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250B = 4000*Normal(prob1percent250B, 250, np.sqrt(250))
y99percent250B = 4000*Normal(prob99percent250B, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250B, y1percent250B))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250B, y99percent250B))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[0],ybar250B[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[1],ybar250B[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 4000 times.", backgroundcolor = "white")
###Second random set
Events250B = np.random.rand(1000000)
Counters250B = np.zeros(4000)
for value in Events250B:
Place = int(4000 * value)
Counters250B[Place] +=1
####Plot- The result of counting 250 events 4000 times as well as the errors, one sigma, one percent, 99 percent with separate seed. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 4000 times Another Seed
plt.figure("Count 250 Events 4000 times Another Seed")
Numcounts250B, binedges250B, patches = plt.hist(Counters250B, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250B = 0.5*(binedges250B[1:] + binedges250B[:-1])
y250B = 4000 * Normal(centers250B, 250, np.sqrt(250))
xbar250B = np.zeros(2)
ybar250B = np.zeros(2)
xbar250B[0] = 250 - np.sqrt(250)
xbar250B[1] = 250 + np.sqrt(250)
ybar250B = 4000*Normal(xbar250B, 250, np.sqrt(250))
plt.plot(xbar250B, ybar250B, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250B, y250B, alpha = 1.0, color = "red", lw =5)
prob1percent250B = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250B = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250B = 4000*Normal(prob1percent250B, 250, np.sqrt(250))
y99percent250B = 4000*Normal(prob99percent250B, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250B, y1percent250B))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250B, y99percent250B))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[0],ybar250B[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[1],ybar250B[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 4000 times. Another Seed", backgroundcolor = "white")
#For Agg backen
pylab.show()
| cloudmesh/book | examples/physics/number-theory/higgs_classIII.py | Python | apache-2.0 | 19,448 | [
"Gaussian"
] | 760f92d21df1dda7f7691926be10d7f979e7712ae0ed9cce89f1ff31d95f1828 |
#!/usr/bin/python
########################################################################## RAD4SNPs:##############################################################################
# A set of Python scripts to select and validate independent SNPs markers from a list of read files #
##################################################################################################################################################################
# MAIN PROGRAM
# Authors: G.LASSALLE ([email protected]) & C.DELORD ([email protected])
# Last update: AUGUST 2017
#################### PRE-CONDITIONS
#- [-i] Working directory where to store results of the pipeline for the focal species X
#- [-d] exact name of MySQL database where denovo_map.pl Stacks data are available for the focal species X
#- [-i1] single-end reads (reads 1) for focal species X duplicate 1
#- [-i2] single-end reads (reads 1) for focal species X duplicate 2
#- [-i3] paired-end reads (reads 2) for focal species X duplicate 1
#- [-i4] paired-end reads (reads 2) for focal species X duplicate 2
#- BWA and SAMtools available
#- Connexion to the Stacks MySQL database available: databases of Stacks 'denovo_map output' for each species.
###############################################################################
import argparse
import os
import sys
import MySQLdb
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store', dest='InputDir', help='Working Directory')
parser.add_argument('-d', action='store', dest='database', help='Stacks database')
parser.add_argument('-c', action='store', dest='CodeSp', help='ID of the species')
parser.add_argument('-i1', action='store', dest='R11', help='First R1 file')
parser.add_argument('-i2', action='store', dest='R12', help='Second R1 file')
parser.add_argument('-i3', action='store', dest='R21', help='First R2 file')
parser.add_argument('-i4', action='store', dest='R22', help='Second R2 file')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
results = parser.parse_args()
print 'input directory =', results.InputDir
##############################################################################
# Arguments testing
##############################################################################
if results.InputDir:
if os.path.isdir(results.InputDir):
print "Working directory is valid."
else :
print "Caution: working directory is invalid, please ckeck [-i]."
sys.exit()
else :
print "Please insert path for working directory [-i]. End of program."
sys.exit()
##############################################################################
if results.database:
db = MySQLdb.connect(host="", # your host, usually localhost
user="", # your username
passwd="", # your password
db=results.database) # name of the database
cur1= db.cursor() # connexion
print "Currently working on MySQL database: "+str(results.database)
else:
print "Incorrect ID for database: database not found, please check [-d]"
sys.exit()
###############################################################################
#
if results.R11:
if os.path.isfile(results.R11):
print "First file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i1]."
sys.exit()
else :
print "Please insert path to single-end read files [-i1]. End of program."
sys.exit()
#
if results.R12:
if os.path.isfile(results.R12):
print "Second file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i2]."
sys.exit()
else :
print "Please insert path to single-end read files [-2]. End of program."
sys.exit()
#
if results.R21:
if os.path.isfile(results.R21):
print "First file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i3]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i3]. End of program."
sys.exit()
#
if results.R22:
if os.path.isfile(results.R22):
print "Second file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i4]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i4]. End of program."
sys.exit()
###############################################################################
if results.CodeSp:
CodeEspece=str(results.CodeSp)
if CodeEspece[:1]!="_":
CodeEspece=str(results.CodeSp)+str("_")
else:
CodeEspece="std_"
###############################################################################
WorkDir=os.path.abspath(results.InputDir) # Current working directory
FastaCatalog=str(WorkDir)+"/"+str(results.CodeSp)+"Catalog.fasta" # Formatting name of candidates fasta file -output of MySQL filtering
###############################################################################
# Main program
###############################################################################
if os.path.isfile("/usr/bin/bwa"):
print "BWA program is found."
else :
print "Cannot find BWA: please check out pipeline requirements."
sys.exit()
###samtools
if os.path.isfile("/usr/bin/samtools"):
print "SAMtools program is found."
else :
print "Cannot find SAMtools: please check out pipeline requirements."
sys.exit()
#####################################################
# Working directory writable
filepath = results.InputDir+'/file.txt'
try:
filehandle = open( filepath, 'w' )
except IOError:
sys.exit( 'Working directory is not accessible' + filepath )
###############################################################################
# Pipeline commands:
###############################################################################
#################################### FIRST FILTERING ##########################
print os.getcwd()
commandeExtractFasta="./RAD4SNPs_SQL2Fasta.py -o "+str(FastaCatalog)+" -d "+str(results.database)+" -c "+str(CodeEspece)
print "Extraction du fichier fasta"
print commandeExtractFasta
os.system(commandeExtractFasta)
############################## Fusion of single-end reads #####################
if results.R11:
if results.R12:
commandFusionR1="cat "+str(results.R11)+" "+str(results.R12)+" > "+str(WorkDir)+"/allR1.fq.gz"
else :
commandFusionR1="cp "+str(results.R11)+" "+str(WorkDir)+"/allR1.fq.gz"
#############################fin de fusion
############################## Fusion of paired-end reads #####################
if results.R21:
if results.R22:
commandFusionR2="cat "+str(results.R21)+" "+str(results.R22)+" > "+str(WorkDir)+"/allR2.fq.gz"
else :
commandFusionR2="cp "+str(results.R21)+" "+str(WorkDir)+"/allR2.fq.gz"
#################################### SECOND FILTERING (1) #####################
command1="bwa index "+str(FastaCatalog) # Indexing
command2="bwa mem -a -M "+str(FastaCatalog)+" "+str(WorkDir)+"/allR1.fq.gz > "+str(WorkDir)+"/PremierAlign.sam" # SE reads alignment
command3="samtools view -Sb "+str(WorkDir)+"/PremierAlign.sam | samtools sort - "+str(WorkDir)+"/PremierAlign1Sorted" # Conversion to bam file
command4="samtools view -F4 "+str(WorkDir)+"/PremierAlign1Sorted.bam > "+str(WorkDir)+"/PremierAlign1Sorted-F4.sam" # Elimination of unmapped SE reads
print "SE reads merging: "+str(commandFusionR1)
os.system(commandFusionR1)
print "PE reads merging: "+str(commandFusionR2)
os.system(commandFusionR2)
print "BWA indexing: "+str(command1)
os.system(command1)
print "Alignment: "+str(command2)
os.system(command2)
print "Conversion to bam file: "+str(command3)
os.system(command3)
print "Elimination of unmapped SE reads: "+str(command4)
os.system(command4)
print " ************************************************************************"
print " Second filtering (1) with default parameters "
print " ************************************************************************"
print os.getcwd()
commande5="./RAD4SNPs_SamFilter.py -i "+str(WorkDir)+"/PremierAlign1Sorted-F4.sam"
os.system(commande5)
Candidatfasta1=str(WorkDir)+"/PremierAlign1Sorted-F4R1Filtered.fa" # Obtention of incomplete SE-validated fasta file
if os.path.isfile(Candidatfasta1):
print "SE-validated fasta file about to be completed. Re-aligning to complete second filtering."
else :
sys.exit( '****ERROR**** A problem occurred. Please check out alignment outputs.')
#################################### SECOND FILTERING (2) #####################
command21="bwa index "+str(Candidatfasta1)
command22="bwa mem -a -M "+str(Candidatfasta1)+" "+str(WorkDir)+"/allR1.fq.gz > "+str(WorkDir)+"/SecondAlign.sam"
command23="samtools view -Sb "+str(WorkDir)+"/SecondAlign.sam | samtools sort - "+str(WorkDir)+"/SecondAlign1Sorted"
command25="samtools index "+str(WorkDir)+"/SecondAlign1Sorted.bam"
command25bis="samtools faidx "+str(Candidatfasta1)
command26="samtools mpileup -d 1000 -O --ff 4 -f "+str(Candidatfasta1) +" "+ str(WorkDir)+"/SecondAlign1Sorted.bam"+" > "+str(WorkDir)+"/CandidatsR1.pileup"
print "BWA indexing: "+str(command21)
os.system(command21)
print "Alignment: "+str(command22)
os.system(command22)
print "Conversion to bam file: "+str(command23)
os.system(command23)
print "Indexing of bam file: "+str(command25)
os.system(command25)
print "Indexing for pileup file: "+str(command25bis)
os.system(command25bis)
print "Construction of SE pileup file: "+str(command26)
os.system(command26)
print " ************************************************************************"
print " Second filtering (2) with default parameters "
print " ************************************************************************"
print os.getcwd()
command27="./RAD4SNPs_PileupFilter.py -i "+str(WorkDir)+"/CandidatsR1.pileup"
print "End of second filtering: elimination of flanking variants: "+str(command27)
os.system(command27)
command28="./RAD4SNPs_FinalSQLExtract.py -i"+str(WorkDir)+"/CandidatsR1NoMulti.txt -d "+str(results.database)+" -c "+str(CodeEspece)+" > "+str(WorkDir)+"/CandidatFin.fasta"
print "Complete SE-validated fasta file: "+str(command28)
os.system(command28)
command28bis="sed -i '1d' "+str(WorkDir)+"/CandidatFin.fasta"
os.system(command28bis)
#################################### THIRD FILTERING ##########################
CandidatFin=str(WorkDir)+"/CandidatFin.fasta"
if os.path.isfile(CandidatFin):
print "SE-validated fasta file is completed. Re-aligning to perform third filtering."
else :
sys.exit( '****ERROR**** A problem occurred. Please check out alignment and/or pileup outputs.')
command29="bwa index "+str(CandidatFin)
command30="bwa mem -a -M "+str(CandidatFin)+" "+str(WorkDir)+"/allR2.fq.gz > "+str(WorkDir)+"/ThirdAlign.sam"
command31="samtools view -Sb "+str(WorkDir)+"/ThirdAlign.sam | samtools sort - "+str(WorkDir)+"/ThirdAlign2Sorted"
command32="samtools index "+str(WorkDir)+"/ThirdAlign2Sorted.bam"
command32bis="samtools faidx "+str(CandidatFin)
command33="samtools mpileup -d 1000 -O --ff 4 -f "+str(CandidatFin)+" "+str(WorkDir)+"/ThirdAlign2Sorted.bam"+" > "+str(WorkDir)+"/Candidats3.pileup"
print "BWA indexing: "+str(command29)
os.system(command29)
print "Alignment: "+str(command30)
os.system(command30)
print "Conversion to bam file: "+str(command31)
os.system(command31)
print "Indexing of bam file: "+str(command32)
os.system(command32)
print "Indexing for pileup file: "+str(command32bis)
os.system(command32bis)
print "Construction of PE pileup file: "+str(command33)
os.system(command33)
print " ************************************************************************"
print " Third filtering with default parameters "
print " ************************************************************************"
print os.getcwd()
command34="./RAD4SNPs_PileupFilter.py -i "+str(WorkDir)+"/Candidats3.pileup"
print "End of third filtering: elimination of flanking variants: "+str(command34)
os.system(command34)
command35="./RAD4SNPs_FinalSQLExtract.py -i"+str(WorkDir)+"/CandidatsR2NoMulti.txt -d "+str(results.database)+" -c "+str(CodeEspece)+" > "+str(WorkDir)+"/SNPs_out.fasta"
print "Complete PE-validated fasta file: "+str(command35)
os.system(command35)
# End.
| glassalle/Rad4Snps | RAD4SNPs_Main.py | Python | gpl-3.0 | 12,997 | [
"BWA"
] | 0fd724303503e8f90d650d5a2f4ecc3b37ddba1d6cbfd52562b19041b1c5a5a4 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test single point logfiles in cclib."""
import datetime
import os
import unittest
import numpy
import packaging
from common import get_minimum_carbon_separation
from skip import skipForParser
from skip import skipForLogfile
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericSPTest(unittest.TestCase):
"""Generic restricted single point unittest"""
# Molecular mass of DVB in mD, and expected precision.
molecularmass = 130078.25
mass_precision = 0.10
# In STO-3G, H has 1, C has 5 (1 S and 4 SP).
nbasisdict = {1:1, 6:5}
# Approximate B3LYP energy of dvb after SCF in STO-3G.
b3lyp_energy = -10365
# Overlap first two atomic orbitals.
overlap01 = 0.24
# Generally, one criteria for SCF energy convergence.
num_scf_criteria = 1
def testnatom(self):
"""Is the number of atoms equal to 20?"""
self.assertEqual(self.data.natom, 20)
def testatomnos(self):
"""Are the atomnos correct?"""
# The nuclear charges should be integer values in a NumPy array.
self.assertTrue(numpy.alltrue([numpy.issubdtype(atomno, numpy.signedinteger)
for atomno in self.data.atomnos]))
self.assertEqual(self.data.atomnos.dtype.char, 'i')
self.assertEqual(self.data.atomnos.shape, (20,) )
self.assertEqual(sum(self.data.atomnos == 6) + sum(self.data.atomnos == 1), 20)
@skipForParser('DALTON', 'DALTON has a very low accuracy for the printed values of all populations (2 decimals rounded in a weird way), so let it slide for now')
@skipForParser('FChk', 'The parser is still being developed so we skip this test')
@skipForLogfile('Jaguar/basicJaguar7', 'We did not print the atomic partial charges in the unit tests for this version')
@skipForLogfile('Molpro/basicMolpro2006', "These tests were run a long time ago and since we don't have access to Molpro 2006 anymore, we can skip this test (it is tested in 2012)")
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testatomcharges(self):
"""Are atomcharges (at least Mulliken) consistent with natom and sum to zero?"""
for type in set(['mulliken'] + list(self.data.atomcharges.keys())):
charges = self.data.atomcharges[type]
self.assertEqual(len(charges), self.data.natom)
self.assertAlmostEqual(sum(charges), 0.0, delta=0.001)
def testatomcoords(self):
"""Are the dimensions of atomcoords 1 x natom x 3?"""
expected_shape = (1, self.data.natom, 3)
self.assertEqual(self.data.atomcoords.shape, expected_shape)
def testatomcoords_units(self):
"""Are atomcoords consistent with Angstroms?"""
min_carbon_dist = get_minimum_carbon_separation(self.data)
dev = abs(min_carbon_dist - 1.34)
self.assertTrue(dev < 0.03, "Minimum carbon dist is %.2f (not 1.34)" % min_carbon_dist)
@skipForParser('Molcas', 'missing mult')
def testcharge_and_mult(self):
"""Are the charge and multiplicity correct?"""
self.assertEqual(self.data.charge, 0)
self.assertEqual(self.data.mult, 1)
def testnbasis(self):
"""Is the number of basis set functions correct?"""
count = sum([self.nbasisdict[n] for n in self.data.atomnos])
self.assertEqual(self.data.nbasis, count)
@skipForParser('ADF', 'ADF parser does not extract atombasis')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testatombasis(self):
"""Are the indices in atombasis the right amount and unique?"""
all = []
for i, atom in enumerate(self.data.atombasis):
self.assertEqual(len(atom), self.nbasisdict[self.data.atomnos[i]])
all += atom
# Test if there are as many indices as atomic orbitals.
self.assertEqual(len(all), self.data.nbasis)
# Check if all are different (every orbital indexed once).
self.assertEqual(len(set(all)), len(all))
@skipForParser('FChk', 'Formatted checkpoint files do not have a section for atommasses')
@skipForParser('GAMESS', 'atommasses not implemented yet')
@skipForParser('GAMESSUK', 'atommasses not implemented yet')
@skipForParser('Jaguar', 'atommasses not implemented yet')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Molpro', 'atommasses not implemented yet')
@skipForParser('NWChem', 'atommasses not implemented yet')
@skipForLogfile('Psi4/basicPsi4.0b5', 'atommasses not implemented yet')
@skipForParser('QChem', 'atommasses not implemented yet')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testatommasses(self):
"""Do the atom masses sum up to the molecular mass?"""
mm = 1000*sum(self.data.atommasses)
msg = "Molecule mass: %f not %f +- %fmD" % (mm, self.molecularmass, self.mass_precision)
self.assertAlmostEqual(mm, self.molecularmass, delta=self.mass_precision, msg=msg)
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testcoreelectrons(self):
"""Are the coreelectrons all 0?"""
ans = numpy.zeros(self.data.natom, 'i')
numpy.testing.assert_array_equal(self.data.coreelectrons, ans)
@skipForParser('FChk', 'Formatted checkpoint files do not have a section for symmetry')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Molpro', '?')
@skipForParser('ORCA', 'ORCA has no support for symmetry yet')
def testsymlabels(self):
"""Are all the symmetry labels either Ag/u or Bg/u?"""
sumwronglabels = sum([x not in ['Ag', 'Bu', 'Au', 'Bg'] for x in self.data.mosyms[0]])
self.assertEqual(sumwronglabels, 0)
def testhomos(self):
"""Is the index of the HOMO equal to 34?"""
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34],"i"), "%s != array([34],'i')" % numpy.array_repr(self.data.homos))
@skipForParser('FChk', 'Formatted Checkpoint files do not have a section for SCF energy')
def testscfvaluetype(self):
"""Are scfvalues and its elements the right type??"""
self.assertEqual(type(self.data.scfvalues),type([]))
self.assertEqual(type(self.data.scfvalues[0]),type(numpy.array([])))
@skipForParser('FChk', 'Formatted Checkpoint files do not have a section for SCF energy')
def testscfenergy(self):
"""Is the SCF energy within the target?"""
self.assertAlmostEqual(self.data.scfenergies[-1], self.b3lyp_energy, delta=40, msg="Final scf energy: %f not %i +- 40eV" %(self.data.scfenergies[-1], self.b3lyp_energy))
@skipForParser('FChk', 'Formatted Checkpoint files do not have a section for SCF convergence')
def testscftargetdim(self):
"""Do the scf targets have the right dimensions?"""
self.assertEqual(self.data.scftargets.shape, (len(self.data.scfvalues), len(self.data.scfvalues[0][0])))
@skipForParser('FChk', 'Formatted Checkpoint files do not have a section for SCF convergence')
def testscftargets(self):
"""Are correct number of SCF convergence criteria being parsed?"""
self.assertEqual(len(self.data.scftargets[0]), self.num_scf_criteria)
def testlengthmoenergies(self):
"""Is the number of evalues equal to nmo?"""
if hasattr(self.data, "moenergies"):
self.assertEqual(len(self.data.moenergies[0]), self.data.nmo)
def testtypemoenergies(self):
"""Is moenergies a list containing one numpy array?"""
if hasattr(self.data, "moenergies"):
self.assertIsInstance(self.data.moenergies, list)
self.assertIsInstance(self.data.moenergies[0], numpy.ndarray)
@skipForParser('DALTON', 'mocoeffs not implemented yet')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
@skipForParser('Turbomole', 'Use of symmetry has reduced the number of mo coeffs')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 1 x nmo x nbasis?"""
if hasattr(self.data, "mocoeffs"):
self.assertIsInstance(self.data.mocoeffs, list)
self.assertEqual(len(self.data.mocoeffs), 1)
self.assertEqual(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
@skipForParser('DALTON', 'mocoeffs not implemented yet')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
def testfornoormo(self):
"""Do we have NOs or MOs?"""
self.assertTrue(
hasattr(self.data, "nocoeffs") or hasattr(self.data, "mocoeffs")
)
def testdimnoccnos(self):
"""Is the length of nooccnos equal to nmo?"""
if hasattr(self.data, "nooccnos"):
self.assertIsInstance(self.data.nooccnos, numpy.ndarray)
self.assertEqual(len(self.data.nooccnos), self.data.nmo)
def testdimnocoeffs(self):
"""Are the dimensions of nocoeffs equal to nmo x nmo?"""
if hasattr(self.data, "nocoeffs"):
self.assertIsInstance(self.data.nocoeffs, numpy.ndarray)
self.assertEqual(
self.data.nocoeffs.shape, (self.data.nmo, self.data.nmo)
)
@skipForParser('DALTON', 'To print: **INTEGRALS\n.PROPRI')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Psi4', 'Psi4 does not currently have the option to print the overlap matrix')
@skipForParser('QChem', 'QChem cannot print the overlap matrix')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testaooverlaps(self):
"""Are the dims and values of the overlap matrix correct?"""
self.assertEqual(self.data.aooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.aooverlaps[0,:]
col = self.data.aooverlaps[:,0]
self.assertEqual(sum(col - row), 0.0)
# All values on diagonal should be exactly one.
for i in range(self.data.nbasis):
self.assertEqual(self.data.aooverlaps[i,i], 1.0)
# Check some additional values that don't seem to move around between programs.
self.assertAlmostEqual(self.data.aooverlaps[0, 1], self.overlap01, delta=0.01)
self.assertAlmostEqual(self.data.aooverlaps[1, 0], self.overlap01, delta=0.01)
self.assertAlmostEqual(self.data.aooverlaps[3,0], 0.0)
self.assertAlmostEqual(self.data.aooverlaps[0,3], 0.0)
def testoptdone(self):
"""There should be no optdone attribute set."""
self.assertFalse(hasattr(self.data, 'optdone'))
@skipForParser('FChk', 'The parser is still being developed so we skip this test')
@skipForParser('Gaussian', 'Logfile needs to be updated')
@skipForParser('Jaguar', 'No dipole moments in the logfile')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
def testmoments(self):
"""Does the dipole and possible higher molecular moments look reasonable?"""
# The reference point is always a vector, but not necessarily the
# origin or center of mass. In this case, however, the center of mass
# is at the origin, so we now what to expect.
reference = self.data.moments[0]
self.assertEqual(len(reference), 3)
for x in reference:
self.assertEqual(x, 0.0)
# Length and value of dipole moment should always be correct (zero for this test).
dipole = self.data.moments[1]
self.assertEqual(len(dipole), 3)
for d in dipole:
self.assertAlmostEqual(d, 0.0, places=7)
# If the quadrupole is there, we can expect roughly -50B for the XX moment,
# -50B for the YY moment and and -60B for the ZZ moment.
if len(self.data.moments) > 2:
quadrupole = self.data.moments[2]
self.assertEqual(len(quadrupole), 6)
self.assertAlmostEqual(quadrupole[0], -50, delta=2.5)
self.assertAlmostEqual(quadrupole[3], -50, delta=2.5)
self.assertAlmostEqual(quadrupole[5], -60, delta=3)
# If the octupole is there, it should have 10 components and be zero.
if len(self.data.moments) > 3:
octupole = self.data.moments[3]
self.assertEqual(len(octupole), 10)
for m in octupole:
self.assertAlmostEqual(m, 0.0, delta=0.001)
# The hexadecapole should have 15 elements, an XXXX component of around -1900 Debye*ang^2,
# a YYYY component of -330B and a ZZZZ component of -50B.
if len(self.data.moments) > 4:
hexadecapole = self.data.moments[4]
self.assertEqual(len(hexadecapole), 15)
self.assertAlmostEqual(hexadecapole[0], -1900, delta=90)
self.assertAlmostEqual(hexadecapole[10], -330, delta=11)
self.assertAlmostEqual(hexadecapole[14], -50, delta=2.5)
# The are 21 unique 32-pole moments, and all are zero in this test case.
if len(self.data.moments) > 5:
moment32 = self.data.moments[5]
self.assertEqual(len(moment32), 21)
for m in moment32:
self.assertEqual(m, 0.0)
@skipForParser('ADF', 'reading basis set names is not implemented')
@skipForParser('GAMESSUK', 'reading basis set names is not implemented')
@skipForParser('Molcas', 'reading basis set names is not implemented')
@skipForParser('ORCA', 'reading basis set names is not implemented')
@skipForParser('Psi4', 'reading basis set names is not implemented')
def testmetadata_basis_set(self):
"""Does metadata have expected keys and values?"""
self.assertEqual(self.data.metadata["basis_set"].lower(), "sto-3g")
@skipForParser('ADF', 'reading input file contents and name is not implemented')
@skipForParser('DALTON', 'reading input file contents and name is not implemented')
@skipForParser('FChk', 'Formatted checkpoint files do not have an input file section')
@skipForParser('GAMESS', 'reading input file contents and name is not implemented')
@skipForParser('GAMESSUK', 'reading input file contents and name is not implemented')
@skipForParser('Gaussian', 'reading input file contents and name is not implemented')
@skipForParser('Jaguar', 'reading input file contents and name is not implemented')
@skipForParser('Molcas', 'reading input file contents and name is not implemented')
@skipForParser('Molpro', 'reading input file contents and name is not implemented')
@skipForParser('NWChem', 'reading input file contents and name is not implemented')
@skipForParser('Psi4', 'reading input file contents and name is not implemented')
@skipForParser('QChem', 'reading input file contents and name is not implemented')
@skipForParser('Turbomole', 'reading input file contents and name is not implemented')
def testmetadata_input_file(self):
"""Does metadata have expected keys and values?"""
self.assertIn("input_file_contents", self.data.metadata)
# TODO make input file names consistent where possible, though some
# programs do not allow arbitrary file extensions; for example, DALTON
# must end in `dal`.
self.assertIn("dvb_sp.in", self.data.metadata["input_file_name"])
def testmetadata_methods(self):
"""Does metadata have expected keys and values?"""
# TODO implement and unify across parsers; current values are [],
# ["HF"], ["RHF"], and ["DFT"]
self.assertIn("methods", self.data.metadata)
def testmetadata_package(self):
"""Does metadata have expected keys and values?"""
# TODO How can the value be tested when the package name comes from
# the parser and isn't stored on ccData?
self.assertIn("package", self.data.metadata)
@skipForParser('FChk', 'Formatted Checkpoint files do not have section for legacy package version')
def testmetadata_legacy_package_version(self):
"""Does metadata have expected keys and values?"""
# TODO Test specific values for each unit test.
self.assertIn("legacy_package_version", self.data.metadata)
@skipForParser('FChk', 'Formatted Checkpoint files do not have section for package version')
def testmetadata_package_version(self):
"""Does metadata have expected keys and values?"""
# TODO Test specific values for each unit test.
self.assertIsInstance(
packaging.version.parse(self.data.metadata["package_version"]),
packaging.version.Version
)
@skipForParser('ADF', 'reading point group symmetry and name is not implemented')
@skipForParser('FChk', 'point group symmetry cannot be printed')
@skipForParser('GAMESS', 'reading point group symmetry and name is not implemented')
@skipForParser('GAMESSUK', 'reading point group symmetry and name is not implemented')
@skipForParser('Gaussian', 'reading point group symmetry and name is not implemented')
@skipForParser('Jaguar', 'reading point group symmetry and name is not implemented')
@skipForParser('Molcas', 'reading point group symmetry and name is not implemented')
@skipForParser('Molpro', 'reading point group symmetry and name is not implemented')
@skipForParser('MOPAC', 'reading point group symmetry and name is not implemented')
@skipForParser('NWChem', 'reading point group symmetry and name is not implemented')
@skipForParser('ORCA', 'reading point group symmetry and name is not implemented')
@skipForParser('Psi3', 'reading point group symmetry and name is not implemented')
@skipForParser('Psi4', 'reading point group symmetry and name is not implemented')
@skipForParser('QChem', 'reading point group symmetry and name is not implemented')
@skipForParser('Turbomole', 'reading point group symmetry and name is not implemented')
def testmetadata_symmetry_detected(self):
"""Does metadata have expected keys and values?"""
self.assertEqual(self.data.metadata["symmetry_detected"], "c2h")
@skipForParser('ADF', 'reading point group symmetry and name is not implemented')
@skipForParser('FChk', 'point group symmetry cannot be printed')
@skipForParser('GAMESS', 'reading point group symmetry and name is not implemented')
@skipForParser('GAMESSUK', 'reading point group symmetry and name is not implemented')
@skipForParser('Gaussian', 'reading point group symmetry and name is not implemented')
@skipForParser('Jaguar', 'reading point group symmetry and name is not implemented')
@skipForParser('Molcas', 'reading point group symmetry and name is not implemented')
@skipForParser('Molpro', 'reading point group symmetry and name is not implemented')
@skipForParser('MOPAC', 'reading point group symmetry and name is not implemented')
@skipForParser('NWChem', 'reading point group symmetry and name is not implemented')
@skipForParser('ORCA', 'reading point group symmetry and name is not implemented')
@skipForParser('Psi3', 'reading point group symmetry and name is not implemented')
@skipForParser('Psi4', 'reading point group symmetry and name is not implemented')
@skipForParser('QChem', 'reading point group symmetry and name is not implemented')
@skipForParser('Turbomole', 'reading point group symmetry and name is not implemented')
def testmetadata_symmetry_used(self):
"""Does metadata have expected keys and values?"""
self.assertEqual(self.data.metadata["symmetry_used"], "c2h")
@skipForParser('ADF', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('DALTON', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('FChk', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('GAMESS', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('GAMESSUK', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('GAMESSUS', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('Jaguar', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('Molcas', ' reading cpu/wall time is not implemented for this parser')
@skipForParser('Molpro', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('NWChem', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('ORCA', 'reading cpu not implemented for this parser, wall time not available')
@skipForParser('Psi3', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('Psi4', 'reading cpu/wall time is not implemented for this parser')
@skipForParser('Turbomole', 'reading cpu/wall time is not implemented for this parser')
def testmetadata_times(self):
"""Does metadata have expected keys and values of correct types?"""
if "wall_time" in self.data.metadata:
assert self.data.metadata["wall_time"]
assert all(isinstance(wall_time, datetime.timedelta)
for wall_time in self.data.metadata["wall_time"])
if "cpu_time" in self.data.metadata:
assert self.data.metadata["cpu_time"]
assert all(isinstance(cpu_time, datetime.timedelta)
for cpu_time in self.data.metadata["cpu_time"])
class ADFSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# ADF only prints up to 0.1mD per atom, so the precision here is worse than 0.1mD.
mass_precision = 0.3
foverlap00 = 1.00003
foverlap11 = 1.02672
foverlap22 = 1.03585
num_scf_criteria = 2
b3lyp_energy = -140
def testfoverlaps(self):
"""Are the dims and values of the fragment orbital overlap matrix correct?"""
self.assertEqual(self.data.fooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.fooverlaps[0,:]
col = self.data.fooverlaps[:,0]
self.assertEqual(sum(col - row), 0.0)
# Although the diagonal elements are close to zero, the SFOs
# are generally not normalized, so test for a few specific values.
self.assertAlmostEqual(self.data.fooverlaps[0, 0], self.foverlap00, delta=0.0001)
self.assertAlmostEqual(self.data.fooverlaps[1, 1], self.foverlap11, delta=0.0001)
self.assertAlmostEqual(self.data.fooverlaps[2, 2], self.foverlap22, delta=0.0001)
class GaussianSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 3
class JaguarSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 2
class Jaguar7SPTest(JaguarSPTest):
"""Customized restricted single point unittest"""
# Jaguar prints only 10 virtual MOs by default. Can we re-run with full output?
def testlengthmoenergies(self):
"""Is the number of evalues equal to the number of occ. MOs + 10?"""
self.assertEqual(len(self.data.moenergies[0]), self.data.homos[0]+11)
class MolcasSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 4
class MolproSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 2
class NWChemKSSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 3
class PsiSPTest(GenericSPTest):
"""Customized restricted single point HF/KS unittest"""
num_scf_criteria = 2
class OrcaSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# Orca has different weights for the masses
molecularmass = 130190
num_scf_criteria = 3
class TurbomoleSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
num_scf_criteria = 2
def testmetadata_basis_set(self):
"""Does metadata have expected keys and values?"""
# One of our test cases used sto-3g hondo
valid_basis = self.data.metadata["basis_set"].lower() in ("sto-3g", "sto-3g hondo")
self.assertTrue(valid_basis)
class GenericDispersionTest(unittest.TestCase):
"""Generic single-geometry dispersion correction unittest"""
dispersionenergy = -0.4005496
def testdispersionenergies(self):
"""Is the dispersion energy parsed correctly?"""
self.assertTrue(len(self.data.dispersionenergies), 1)
self.assertAlmostEqual(
self.data.dispersionenergies[0],
self.dispersionenergy,
delta=2.0e-7
)
class FireflyDispersionTest(GenericDispersionTest):
"""Customized single-geometry dispersion correction unittest"""
dispersionenergy = -0.4299821
if __name__ == "__main__":
import sys
sys.path.insert(1, os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['SP'])
suite.testall()
| langner/cclib | test/data/testSP.py | Python | bsd-3-clause | 26,270 | [
"ADF",
"Dalton",
"GAMESS",
"Gaussian",
"Jaguar",
"MOLCAS",
"MOPAC",
"Molpro",
"NWChem",
"ORCA",
"Psi4",
"TURBOMOLE",
"cclib"
] | cf9d3b49a84091552e51784824bb312ad756e16cd636f6201d97e597aab2c3b4 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: ast.py
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None,'True': True,'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
if isinstance(node, Num):
return node.n
if isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
if isinstance(node, List):
return list(map(_convert, node.elts))
if isinstance(node, Dict):
return dict(((_convert(k), _convert(v)) for k, v in zip(node.keys, node.values)))
if isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)) and isinstance(node.right, Num) and isinstance(node.right.n, complex) and isinstance(node.left, Num):
if isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [ (a, _format(b)) for a, b in iter_fields(node) ]
rv = '%s(%s' % (node.__class__.__name__,
', '.join(('%s=%s' % field for field in fields) if annotate_fields else (b for a, b in fields)))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join(('%s=%s' % (a, _format(getattr(node, a))) for a in node._attributes))
return rv + ')'
if isinstance(node, list):
return '[%s]' % ', '.join((_format(x) for x in node))
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in ('lineno', 'col_offset'):
if attr in old_node._attributes and attr in new_node._attributes and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield (
field, getattr(node, field))
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/ast.py | Python | unlicense | 11,832 | [
"VisIt"
] | 3df8e80240260e71c78fe66876ac026f8704ff9b0319c99bf57b8e9c6fa76b6a |
# -*- coding: utf-8 -*-
"""Suite of methods common operation on res.partner."""
import base64
import logging
import pprint
import random
from openerp import api, models, fields
_logger = logging.getLogger(__name__)
class ResPartnerGetStudios(models.Model):
"""Overcharge the definition of a partner with methods to get studios
from the location of the partner.
"""
_inherit = 'res.partner'
visit_count = fields.Integer(
'Number of visit for this partner',
readonly=True,
help='Number increased each time the frontend page of the studio is opened.'
)
@api.model
def get_studios_from_same_location(self):
"""Return a list of partners that have a logo with the same locations
that the given partner.
:return: list of partner records.
"""
company_pool = self.env['res.company']
# #294
# Looking for cgstudiomap to avoid to have it displayed.
# cgstudiomap is actually the partner linked to the res.company
# of the instance.
# looking for the first (and only so far) res.company
company = company_pool.browse(1)
# https://github.com/cgstudiomap/cgstudiomap/issues/177
# search return a recordset and we cannot do len() on it.
partners = [
partner for partner in self.search(
self.open_companies_domain.search +
[
('id', '!=', company.partner_id.id),
('image', '!=', False),
('country_id', '=', self.country_id.id),
('id', '!=', self.id)
]
)
]
return partners
@api.model
def get_random_studios_from_same_location(self, sample_):
"""Return a random sample from the list of partners
are returned from `self.get_studios_from_same_location`
:param int sample_: size of the sample of partners.
:return: list of partner records.
"""
partners = self.get_studios_from_same_location()
return random.sample(partners, min(len(partners), sample_))
class ResPartnerSave(models.Model):
"""Overcharge the definition of a partner to add methods to update
from the save page.
"""
_inherit = 'res.partner'
@staticmethod
def get_base64_from_filestore(filestore):
"""Encode a werkzerg.Filestore to a string using base64.
:param werkzerg.Filestore filestore: instance that represents the
new image of the partner.
:return: image encoded using base64
:rtype: str
"""
return base64.b64encode(filestore.read())
@api.model
def clean_values_from_post_request(self,
country_id=None,
remove_image=False,
image_file=None,
**kwargs):
"""Clean the different values coming from the save post request.
Data might be converted to be ingested by odoo.
For example, list of industries has to be gathered from all the keys
starting by industry_ids and converted into an odoo leaf to be ingested
into the X2X industry_ids field.
For the image several options to the user:
- a bool (remove_image) that will just remove the current image.
- a browse (image_file) that will replace the current image by the
newly selected.
If the remove_image is True, the image_file is ignored.
:param int country_id: id of the country to set the partner to.
:param bool remove_image: if the current image of the partner should
be removed.
:param werkzerg.Filestore image_file: instance that represents the
new image of the partner.
:param dict kwargs: list of additional fields to update.
:return: request.render
"""
values = kwargs
if country_id:
values['country_id'] = int(country_id)
if remove_image:
values['image'] = None
_logger.debug('condition: %s', image_file and not remove_image)
if image_file and not remove_image:
values['image'] = self.get_base64_from_filestore(image_file)
industry_ids = []
for key, value in kwargs.items():
if 'industry_ids' in key:
industry_ids.append(int(value))
del kwargs[key]
if industry_ids:
values['industry_ids'] = [(6, 0, industry_ids)]
return values
@api.model
def create_from_post_request(self, kwargs):
"""Process a create from data coming from a post request."""
values = self.clean_values_from_post_request(**kwargs)
# Users are only allowed to create companies.
values['is_company'] = True
return self.create(values)
@api.model
def write_from_post_request(self, kwargs):
"""Process a write from data coming from a post request."""
return self.write(self.clean_values_from_post_request(**kwargs))
@api.one
def full_location_studio_page(self):
"""Return the address as oneliner."""
elements = []
if self.street: elements.append(self.street)
if self.street2: elements.append(self.street2)
if self.city: elements.append(self.city)
if self.zip: elements.append(self.zip)
if self.state_id:
elements.append(self.state_id.name)
self.full_location = ', '.join(elements)
full_location = fields.Char(
'Full Location', compute='full_location_studio_page'
)
class ResPartnerEdition(models.Model):
"""Overcharge the definition of a partner to add methods for edit and
create pages.
"""
_inherit = 'res.partner'
@staticmethod
def get_partner_values():
"""Return the set of data to build edit/create view.
:return: dict
"""
return {
'id': 0,
'write_date': '',
'image_url': '',
'name': '',
'website': '',
'email': '',
'state': '',
'street': '',
'street2': '',
'city': '',
'zip': '',
'industry_ids': [],
'country_id': 0,
# social network urls
'social_networks': {
'twitter': '',
'youtube': '',
'vimeo': '',
'linkedin': '',
'facebook': '',
},
# phone numbers
'calls': {
'phone': '',
'mobile': '',
'fax': '',
}
}
@api.model
def build_values_from_kwargs(self, raw_data):
"""Remap the data in kwargs to a mapping that can be processed by
edition pages.
For the context, the edition pages when they are process on the server
can still have error then a write or a create will raise a error. Then
the data that were supposed to be saved in the partner have to be
remapped to match what the edition page waits for.
:param dict raw_data: data from partner.
:return: remapped data as dict.
"""
_logger.debug('kwargs: %s', pprint.pformat(raw_data))
partner_values = self.get_partner_values()
_logger.debug('partner_values: %s', pprint.pformat(partner_values))
industry_ids = []
for key, value in raw_data.iteritems():
# industries are separated in the raw_data, not part of a single
# list as expected by the page.
if 'industry_ids' in key:
industry_ids.append(int(value))
elif key in partner_values:
# country_id is a string but edition page waits it to be a int
if key in ['country_id']:
value = int(value)
partner_values[key] = value
elif key in partner_values['calls']:
partner_values['calls'][key] = value
elif key in partner_values['social_networks']:
partner_values['social_networks'][key] = value
if industry_ids:
industries = self.env['res.industry']
partner_values['industry_ids'] = industries.browse(industry_ids)
_logger.debug('updated partner_values: %s',
pprint.pformat(partner_values))
return partner_values
@api.model
def build_values_from_partner(self):
"""Fill up the partner_value from a partner record.
:return: dict
"""
websites = self.env['website']
partner_values = self.get_partner_values()
partner_values['id'] = self.id
partner_values['write_date'] = self.write_date
partner_values['name'] = self.name
partner_values['image_url'] = websites.image_url(
self, 'image_medium', size='256x256'
)
partner_values['website'] = self.website
partner_values['email'] = self.email
partner_values['state'] = self.state
partner_values['street'] = self.street
partner_values['street2'] = self.street2
partner_values['city'] = self.city
partner_values['zip'] = self.zip
partner_values['industry_ids'] = self.industry_ids
partner_values['country_id'] = self.country_id.id
partner_values['calls']['phone'] = self.phone
partner_values['calls']['mobile'] = self.mobile
partner_values['calls']['fax'] = self.fax
partner_values['social_networks']['linkedin'] = self.linkedin
partner_values['social_networks']['vimeo'] = self.vimeo
partner_values['social_networks']['youtube'] = self.youtube
partner_values['social_networks']['twitter'] = self.twitter
partner_values['social_networks']['facebook'] = self.facebook
return partner_values
| cgstudiomap/cgstudiomap | main/local_modules/frontend_studio/models/res_partner.py | Python | agpl-3.0 | 10,035 | [
"VisIt"
] | 2aaacfeb6a8e54de183b88b3acfddd3700533deea6660e50dd433ee1784089ad |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Text Reports/Database Summary Report.
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import posixpath
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.lib import Person
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.utils.file import media_path_full
from gramps.gen.datehandler import get_date
#------------------------------------------------------------------------
#
# SummaryReport
#
#------------------------------------------------------------------------
class SummaryReport(Report):
"""
This report produces a summary of the objects in the database.
"""
def __init__(self, database, options, user):
"""
Create the SummaryReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.__db = database
def write_report(self):
"""
Overridden function to generate the report.
"""
self.doc.start_paragraph("SR-Title")
title = _("Database Summary Report")
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.summarize_people()
self.summarize_families()
self.summarize_media()
def summarize_people(self):
"""
Write a summary of all the people in the database.
"""
with_media = 0
incomp_names = 0
disconnected = 0
missing_bday = 0
males = 0
females = 0
unknowns = 0
namelist = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Individuals"))
self.doc.end_paragraph()
num_people = 0
for person in self.__db.iter_people():
num_people += 1
# Count people with media.
length = len(person.get_media_list())
if length > 0:
with_media += 1
# Count people with incomplete names.
for name in [person.get_primary_name()] + person.get_alternate_names():
if name.get_first_name().strip() == "":
incomp_names += 1
else:
if name.get_surname_list():
for surname in name.get_surname_list():
if surname.get_surname().strip() == "":
incomp_names += 1
else:
incomp_names += 1
# Count people without families.
if (not person.get_main_parents_family_handle() and
not len(person.get_family_handle_list())):
disconnected += 1
# Count missing birthdays.
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.__db.get_event_from_handle(birth_ref.ref)
if not get_date(birth):
missing_bday += 1
else:
missing_bday += 1
# Count genders.
if person.get_gender() == Person.FEMALE:
females += 1
elif person.get_gender() == Person.MALE:
males += 1
else:
unknowns += 1
# Count unique surnames
for name in [person.get_primary_name()] + person.get_alternate_names():
if not name.get_surname().strip() in namelist \
and not name.get_surname().strip() == "":
namelist.append(name.get_surname().strip())
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of individuals: %d") % num_people)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Males: %d") % males)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Females: %d") % females)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with unknown gender: %d") % unknowns)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Incomplete names: %d") %
incomp_names)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals missing birth dates: %d") %
missing_bday)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Disconnected individuals: %d") % disconnected)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Unique surnames: %d") % len(namelist))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with media objects: %d") %
with_media)
self.doc.end_paragraph()
def summarize_families(self):
"""
Write a summary of all the families in the database.
"""
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Family Information"))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of families: %d") % self.__db.get_number_of_families())
self.doc.end_paragraph()
def summarize_media(self):
"""
Write a summary of all the media in the database.
"""
total_media = 0
size_in_bytes = 0
notfound = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Media Objects"))
self.doc.end_paragraph()
total_media = len(self.__db.get_media_object_handles())
mbytes = "0"
for media_id in self.__db.get_media_object_handles():
media = self.__db.get_object_from_handle(media_id)
try:
size_in_bytes += posixpath.getsize(
media_path_full(self.__db, media.get_path()))
length = len(str(size_in_bytes))
if size_in_bytes <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(size_in_bytes)[:(length-6)]
except:
notfound.append(media.get_path())
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of unique media objects: %d") %
total_media)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Total size of media objects: %s MB") % mbytes)
self.doc.end_paragraph()
if len(notfound) > 0:
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Missing Media Objects"))
self.doc.end_paragraph()
for media_path in notfound:
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(media_path)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# SummaryOptions
#
#------------------------------------------------------------------------
class SummaryOptions(MenuReportOptions):
"""
SummaryOptions provides the options for the SummaryReport.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the marker report.
"""
pass
def make_default_style(self, default_style):
"""Make the default output style for the Summary Report."""
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_bottom_border(1)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("SR-Title", para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(0)
para.set_description(_('The basic style used for sub-headings.'))
default_style.add_paragraph_style("SR-Heading", para)
font = FontStyle()
font.set_size(12)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("SR-Normal", para)
| Forage/Gramps | gramps/plugins/textreport/summary.py | Python | gpl-2.0 | 11,013 | [
"Brian"
] | e154a9faed0b8e4fe59de95bcd463fe12f020330f4e1ca0309f6493f3f9cb58f |
"""
Utilties for loading static pipe_analysis PNGs into holoviews DynamicMaps
See 'qa-image-browser.ipynb' demo notebook for examples of usage.
"""
from __future__ import print_function, division
import re, os, glob
from itertools import product
from functools import partial
import numpy as np
import holoviews as hv
import logging
from matplotlib import pyplot as plt
try:
from lsst.pipe.analysis.utils import Filenamer
except ImportError:
logging.warning('Pipe analysis not available.')
from .rc import wide_filters, cosmos_filters
from .utils import get_tracts, get_visits
def get_color_plot(butler, tract=8766, description='color_wPerp', style='psfMagHist', scale=None):
dataId = {'tract':tract}
filenamer = Filenamer(butler, 'plotColor', dataId)
filename = filenamer(description=description, dataId=dataId, style=style)
try:
rgb = hv.RGB.load_image(filename, bare=True)
# back out the aspect ratio from bounds
l,b,r,t = rgb.bounds.lbrt()
aspect = (r-l)/(t-b)
h = 480
w = int(h * aspect)
rgb = rgb.opts(plot={'width':w, 'height':h})
if scale is not None:
rgb = rgb.opts(plot={'width':int(w*scale), 'height':int(h*scale)})
return rgb
except FileNotFoundError:
return hv.RGB(np.zeros((2,2))).opts(plot={'width':640, 'height':480})
def color_tract_layout(butler, description, style='psfMagHist', tracts=None, scale=1.0):
if tracts is None:
tracts = get_tracts(butler)
return hv.Layout([get_color_plot(butler, tract, description=description, style=style, scale=scale)
for tract in tracts])
def color_dmap(butler, tracts=None, descriptions=['color_wPerp', 'color_xPerp', 'color_yPerp'],
styles=['psfMagHist', 'sky-stars'], scale=1.0):
if tracts is None:
tracts = get_tracts(butler)
dmap = hv.DynamicMap(partial(color_tract_layout, butler=butler, tracts=tracts, scale=scale), kdims=['description', 'style'])
dmap = dmap.redim.values(description=descriptions, style=styles)
return dmap
def get_plot_filename(butler, tract, filt, description, style, visit=None, kind='coadd'):
dataId = {'tract':tract, 'filter':filt}
if visit is not None:
dataId.update({'visit':visit})
filenamer = Filenamer(butler, 'plot{}'.format(kind.capitalize()), dataId)
filename = filenamer(description=description, style=style, dataId=dataId)
return filename
def get_plot(butler, tract, filt, description, style, visit=None, kind='coadd', scale=None):
filename = get_plot_filename(butler, tract, filt, description, style, visit=visit, kind=kind)
try:
rgb = hv.RGB.load_image(filename, bare=True)
# back out the aspect ratio from bounds
l,b,r,t = rgb.bounds.lbrt()
aspect = (r-l)/(t-b)
h = 480
w = int(h * aspect)
rgb = rgb.opts(plot={'width':w, 'height':h})
if scale is not None:
rgb = rgb.opts(plot={'width':int(w*scale), 'height':int(h*scale)})
return rgb
except FileNotFoundError:
return hv.RGB(np.zeros((2,2))).opts(plot={'width':640, 'height':480})
def filter_layout(butler, tract=9813, description='mag_modelfit_CModel', style='psfMagHist',
visit=None, kind='coadd', scale=0.66, columns=3):
if tract==9813:
filters = cosmos_filters
else:
filters = wide_filters
return hv.Layout([get_plot(butler, tract, f, description, style, kind, scale=scale)
for f in filters]).cols(columns)
def description_layout(butler, descriptions, tract=9813, filt='HSC-I', style='psfMagHist',
visit=None, kind='coadd', scale=0.66, columns=3):
return hv.Layout([get_plot(butler, tract, filt, desc, style, visit=visit, kind=kind, scale=scale)
for desc in descriptions]).cols(columns)
def filter_layout_dmap_coadd(butler, descriptions, tracts=None,
styles=['psfMagHist', 'sky-stars', 'sky-gals'], scale=0.66):
if tracts is None:
tracts = get_tracts(butler)
if len(tracts) > 1:
layout_fn = partial(filter_layout, butler=butler, visit=None, kind='coadd', scale=scale)
values = dict(tract=tracts, description=descriptions, style=styles)
dmap = hv.DynamicMap(layout_fn, kdims=['tract', 'description', 'style'])
else:
layout_fn = partial(filter_layout, butler=butler, tract=tracts[0],
visit=None, kind='coadd', scale=scale)
values = dict(description=descriptions, style=styles)
dmap = hv.DynamicMap(layout_fn, kdims=['description', 'style'])
dmap = dmap.redim.values(**values)
return dmap
def description_layout_dmap_visit(butler, tract, descriptions, filt='HSC-I', styles=['psfMagHist', 'sky-stars', 'sky-gals'], scale=0.66):
# visits = get_visits(field_name(tract), filt)
if tract is None:
tract = get_tracts(butler)[0]
visits = get_visits(butler, tract, filt)
dmap = hv.DynamicMap(partial(description_layout, descriptions=descriptions, butler=butler, tract=tract, filt=filt, kind='visit', scale=scale),
kdims=['visit', 'style'])
dmap = dmap.redim.values(visit=visits, style=styles)
return dmap
| timothydmorton/qa_explorer | explorer/static.py | Python | mit | 5,381 | [
"VisIt"
] | c6ded926eea19819b71e462c01f485d2faeee21646c63a0981cb623075af023f |
#!/usr/bin/python
"""
Last edited by: Siddhanathan Shanmugam <[email protected]>
Copyright (C) 2014 Siddhanathan Shanmugam
LICENSE: LGPL v2.1, or (at your opinion) any later version.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <[email protected]> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <[email protected]>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <[email protected]>
> or visit http://www.stetson.edu/~ProctoLogic/
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from socketserver import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == dict:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary.decode("latin-1")
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message.encode("latin-1")
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == dictg:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), bytes(str(next), "utf-8"))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == int:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = str.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != dict:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
| siddhanathan/raga | OSC.py | Python | gpl-3.0 | 84,732 | [
"VisIt"
] | 5928ab965a996ccfc473e336a9c497f7b3496087eb1998afc25f1408050d4fb8 |
# proxy module
from __future__ import absolute_import
from mayavi.plugins.envisage_engine import *
| enthought/etsproxy | enthought/mayavi/plugins/envisage_engine.py | Python | bsd-3-clause | 99 | [
"Mayavi"
] | 19362411c761ec23be6bd67844ec12dfcaf968b0ae1d8928e32058dfa54f3812 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base types for nodes in a GRIT resource tree.
'''
import ast
import os
import types
from xml.sax import saxutils
from grit import clique
from grit import exception
from grit import util
class Node(object):
'''An item in the tree that has children.'''
# Valid content types that can be returned by _ContentType()
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
# Default nodes to not whitelist skipped
_whitelist_marked_as_skip = False
# A class-static cache to speed up EvaluateExpression().
# Keys are expressions (e.g. 'is_ios and lang == "fr"'). Values are tuples
# (code, variables_in_expr) where code is the compiled expression and can be
# directly eval'd, and variables_in_expr is the list of variable and method
# names used in the expression (e.g. ['is_ios', 'lang']).
eval_expr_cache = {}
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
# duplicates 'children' but
# is needed to preserve markup-type content).
self.name = u'' # The name of this element
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
# This context handler allows you to write "with node:" and get a
# line identifying the offending node if an exception escapes from the body
# of the with statement.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
print u'Error processing node %s' % unicode(self)
def __iter__(self):
'''A preorder iteration through the tree that this node is the root of.'''
return self.Preorder()
def Preorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
yield self
for child in self.children:
for iterchild in child.Preorder():
yield iterchild
def ActiveChildren(self):
'''Returns the children of this node that should be included in the current
configuration. Overridden by <if>.'''
return [node for node in self.children if not node.WhitelistMarkedAsSkip()]
def ActiveDescendants(self):
'''Yields the current node and all descendants that should be included in
the current configuration, in preorder.'''
yield self
for child in self.ActiveChildren():
for descendant in child.ActiveDescendants():
yield descendant
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
#curr = self
#while curr.parent and not hasattr(curr, '_root'):
# curr = curr.parent
#if curr.parent:
# self._root = curr._root
#else:
# self._root = curr
#return self._root
def StartParsing(self, name, parent):
'''Called at the start of parsing.
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
'''
assert isinstance(name, types.StringTypes)
assert not parent or isinstance(parent, Node)
self.name = name
self.parent = parent
def AddChild(self, child):
'''Adds a child to the list of children of this node, if it is a valid
child for the node.'''
assert isinstance(child, Node)
if (not self._IsValidChild(child) or
self._ContentType() == self._CONTENT_TYPE_CDATA):
explanation = 'invalid child %s for parent %s' % (str(child), self.name)
raise exception.UnexpectedChild(explanation)
self.children.append(child)
self.mixed_content.append(child)
def RemoveChild(self, child_id):
'''Removes the first node that has a "name" attribute which
matches "child_id" in the list of immediate children of
this node.
Args:
child_id: String identifying the child to be removed
'''
index = 0
# Safe not to copy since we only remove the first element found
for child in self.children:
name_attr = child.attrs['name']
if name_attr == child_id:
self.children.pop(index)
self.mixed_content.pop(index)
break
index += 1
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
Args:
content: u'hello'
Return:
None
'''
assert isinstance(content, types.StringTypes)
if self._ContentType() != self._CONTENT_TYPE_NONE:
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
Args:
attrib: 'name'
value: 'fooblat'
Return:
None
'''
assert isinstance(attrib, types.StringTypes)
assert isinstance(value, types.StringTypes)
if self._IsValidAttribute(attrib, value):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
def EndParsing(self):
'''Called at the end of parsing.'''
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], types.StringTypes):
# Remove leading and trailing chunks of pure whitespace.
while (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes) and
self.mixed_content[0].strip() == ''):
self.mixed_content = self.mixed_content[1:]
# Strip leading and trailing whitespace from mixed content chunks
# at front and back.
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
self.mixed_content[0] = self.mixed_content[0].lstrip()
# Remove leading and trailing ''' (used to demarcate whitespace)
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
if self.mixed_content[0].startswith("'''"):
self.mixed_content[0] = self.mixed_content[0][3:]
if len(self.mixed_content):
if isinstance(self.mixed_content[-1], types.StringTypes):
# Same stuff all over again for the tail end.
while (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes) and
self.mixed_content[-1].strip() == ''):
self.mixed_content = self.mixed_content[:-1]
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
self.mixed_content[-1] = self.mixed_content[-1].rstrip()
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes().keys()
if mandatt in self.attrs:
if not mandatt_option_found:
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join([c for c in self.mixed_content
if isinstance(c, types.StringTypes)])
def __unicode__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
not include the <?xml> stuff at the top of the string. If one_line is true,
children and CDATA are layed out in a way that preserves internal
whitespace.
'''
assert isinstance(indent, types.StringTypes)
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
# Then the attributes for this node.
attribs = u''
default_attribs = self.DefaultAttributes()
for attrib, value in sorted(self.attrs.items()):
# Only print an attribute if it is other than the default value.
if attrib not in default_attribs or value != default_attribs[attrib]:
attribs += u' %s=%s' % (attrib, saxutils.quoteattr(value))
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
return u'<%s%s>%s</%s>' % (self.name, attribs, inside_content, self.name)
elif content_one_line:
return u'%s<%s%s>\n%s %s\n%s</%s>' % (
indent, self.name, attribs,
indent, inside_content,
indent, self.name)
else:
return u'%s<%s%s>\n%s\n%s</%s>' % (
indent, self.name, attribs,
inside_content,
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, types.StringTypes)
# Build the contents of the element.
inside_parts = []
last_item = None
for mixed_item in self.mixed_content:
if isinstance(mixed_item, Node):
inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line))
if not one_line:
inside_parts.append(u'\n')
else:
message = mixed_item
# If this is the first item and it starts with whitespace, we add
# the ''' delimiter.
if not last_item and message.lstrip() != message:
message = u"'''" + message
inside_parts.append(util.EncodeCdata(message))
last_item = mixed_item
# If there are only child nodes and no cdata, there will be a spurious
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, types.StringTypes) and
last_item.rstrip() != last_item):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Called as a final step of RunGatherers.
Args:
substituter: a grit.util.Substituter object.
'''
for child in self.children:
child.SubstituteMessages(substituter)
def _IsValidChild(self, child):
'''Returns true if 'child' is a valid child of this node.
Overridden by subclasses.'''
return False
def _IsValidAttribute(self, name, value):
'''Returns true if 'name' is the name of a valid attribute of this element
and 'value' is a valid value for that attribute. Overriden by
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
above.'''
return self._CONTENT_TYPE_NONE
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
return []
def DefaultAttributes(self):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
Return:
[clique1, clique2] or []
'''
return []
def ToRealPath(self, path_from_basedir):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
Args:
path_from_basedir: '..'
Return:
'resource'
'''
return util.normpath(os.path.join(self.GetRoot().GetBaseDir(),
os.path.expandvars(path_from_basedir)))
def GetInputPath(self):
'''Returns a path, relative to the base directory set for the grd file,
that points to the file the node refers to.
'''
# This implementation works for most nodes that have an input file.
return self.attrs['file']
def UberClique(self):
'''Returns the uberclique that should be used for messages originating in
a given node. If the node itself has its uberclique set, that is what we
use, otherwise we search upwards until we find one. If we do not find one
even at the root node, we set the root node's uberclique to a new
uberclique instance.
'''
node = self
while not node.uberclique and node.parent:
node = node.parent
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
'''
if not 'translateable' in self.attrs:
return True
else:
return self.attrs['translateable'] == 'true'
def GetNodeById(self, id):
'''Returns the node in the subtree parented by this node that has a 'name'
attribute matching 'id'. Returns None if no such node is found.
'''
for node in self:
if 'name' in node.attrs and node.attrs['name'] == id:
return node
return None
def GetChildrenOfType(self, type):
'''Returns a list of all subnodes (recursing to all leaves) of this node
that are of the indicated type (or tuple of types).
Args:
type: A type you could use with isinstance().
Return:
A list, possibly empty.
'''
return [child for child in self if isinstance(child, type)]
def GetTextualIds(self):
'''Returns a list of the textual ids of this node.
'''
if 'name' in self.attrs:
return [self.attrs['name']]
return []
@classmethod
def EvaluateExpression(cls, expr, defs, target_platform, extra_variables={}):
'''Worker for EvaluateCondition (below) and conditions in XTB files.'''
if expr in cls.eval_expr_cache:
code, variables_in_expr = cls.eval_expr_cache[expr]
else:
# Get a list of all variable and method names used in the expression.
syntax_tree = ast.parse(expr, mode='eval')
variables_in_expr = [node.id for node in ast.walk(syntax_tree) if
isinstance(node, ast.Name) and node.id not in ('True', 'False')]
code = compile(syntax_tree, filename='<string>', mode='eval')
cls.eval_expr_cache[expr] = code, variables_in_expr
# Set values only for variables that are needed to eval the expression.
variable_map = {}
for name in variables_in_expr:
if name == 'os':
value = target_platform
elif name == 'defs':
value = defs
elif name == 'is_linux':
value = target_platform.startswith('linux')
elif name == 'is_macosx':
value = target_platform == 'darwin'
elif name == 'is_win':
value = target_platform in ('cygwin', 'win32')
elif name == 'is_android':
value = target_platform == 'android'
elif name == 'is_ios':
value = target_platform == 'ios'
elif name == 'is_bsd':
value = 'bsd' in target_platform
elif name == 'is_posix':
value = (target_platform in ('darwin', 'linux2', 'linux3', 'sunos5',
'android', 'ios')
or 'bsd' in target_platform)
elif name == 'pp_ifdef':
def pp_ifdef(symbol):
return symbol in defs
value = pp_ifdef
elif name == 'pp_if':
def pp_if(symbol):
return defs.get(symbol, False)
value = pp_if
elif name in defs:
value = defs[name]
elif name in extra_variables:
value = extra_variables[name]
else:
# Undefined variables default to False.
value = False
variable_map[name] = value
eval_result = eval(code, {}, variable_map)
assert isinstance(eval_result, bool)
return eval_result
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
The expression is given a few local variables:
- 'lang' is the language currently being output
(the 'lang' attribute of the <output> element).
- 'context' is the current output context
(the 'context' attribute of the <output> element).
- 'defs' is a map of C preprocessor-style symbol names to their values.
- 'os' is the current platform (likely 'linux2', 'win32' or 'darwin').
- 'pp_ifdef(symbol)' is a shorthand for "symbol in defs".
- 'pp_if(symbol)' is a shorthand for "symbol in defs and defs[symbol]".
- 'is_linux', 'is_macosx', 'is_win', 'is_posix' are true if 'os'
matches the given platform.
'''
root = self.GetRoot()
lang = getattr(root, 'output_language', '')
context = getattr(root, 'output_context', '')
defs = getattr(root, 'defines', {})
target_platform = getattr(root, 'target_platform', '')
extra_variables = {
'lang': lang,
'context': context,
}
return Node.EvaluateExpression(
expr, defs, target_platform, extra_variables)
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
Attrs:
languages: ['fr', 'zh_cn']
'''
for node in self:
if (hasattr(node, 'IsTranslation') and
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
def FindBooleanAttribute(self, attr, default, skip_self):
'''Searches all ancestors of the current node for the nearest enclosing
definition of the given boolean attribute.
Args:
attr: 'fallback_to_english'
default: What to return if no node defines the attribute.
skip_self: Don't check the current node, only its parents.
'''
p = self.parent if skip_self else self
while p:
value = p.attrs.get(attr, 'default').lower()
if value != 'default':
return (value == 'true')
p = p.parent
return default
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
the allow_pseudo attribute set to false.
'''
return self.FindBooleanAttribute('allow_pseudo',
default=True, skip_self=True)
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
given message.
'''
return self.FindBooleanAttribute('fallback_to_english',
default=False, skip_self=True)
def WhitelistMarkedAsSkip(self):
'''Returns true if the node is marked to be skipped in the output by a
whitelist.
'''
return self._whitelist_marked_as_skip
def SetWhitelistMarkedAsSkip(self, mark_skipped):
'''Sets WhitelistMarkedAsSkip.
'''
self._whitelist_marked_as_skip = mark_skipped
def ExpandVariables(self):
'''Whether we need to expand variables on a given node.'''
return False
def IsResourceMapSource(self):
'''Whether this node is a resource map source.'''
return False
def GeneratesResourceMapEntry(self, output_all_resource_defines,
is_active_descendant):
'''Whether this node should output a resource map entry.
Args:
output_all_resource_defines: The value of output_all_resource_defines for
the root node.
is_active_descendant: Whether the current node is an active descendant
from the root node.'''
return False
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
def _ContentType(self):
return self._CONTENT_TYPE_MIXED
| hujiajie/chromium-crosswalk | tools/grit/grit/node/base.py | Python | bsd-3-clause | 22,026 | [
"xTB"
] | 7d551037f96b7515e474602a9d119859aa8c88a3ad426e91acdd5c9e75e36216 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.