code
stringlengths 658
1.05M
|
---|
import re
import slugify as ascii_slugify
slugify = ascii_slugify.slugify
def u_slugify(txt):
"""A custom version of slugify that retains non-ascii characters. The purpose of this
function in the application is to make URLs more readable in a browser, so there are
some added heuristics to retain as much of the title meaning as possible while
excluding characters that are troublesome to read in URLs. For example, question marks
will be seen in the browser URL as %3F and are thereful unreadable. Although non-ascii
characters will also be hex-encoded in the raw URL, most browsers will display them
as human-readable glyphs in the address bar -- those should be kept in the slug."""
txt = txt.strip() # remove trailing whitespace
txt = re.sub('\s*-\s*','-', txt, re.UNICODE) # remove spaces before and after dashes
txt = re.sub('[\s/]', '-', txt, re.UNICODE) # replace remaining spaces with underscores
txt = re.sub('(\d):(\d)', r'\1-\2', txt, re.UNICODE) # replace colons between numbers with dashes
txt = re.sub('"', "", txt, re.UNICODE) # replace double quotes with single quotes
txt = re.sub(r'''['.|?,:!@#~`+=$%^&\\*()\[\]{}<>]''','',txt, re.UNICODE) # remove some characters altogether
txt = re.sub(r'(\-+)', '-', txt, re.UNICODE)
return txt
|
# -*- coding: utf-8 -*-
import gtk
import gobject
from editablelabel import EditableLabel
from menu import GshellTabPopupMenu
from config import Config
class GshellTabLabel(gtk.HBox):
__gsignals__ = {
'close-clicked': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,(gobject.TYPE_OBJECT,)),
}
def __init__(self, title, notebook):
gtk.HBox.__init__(self)
self.notebook = notebook
self.config = Config()
self.terminal = None
self.label = EditableLabel(title)
self.update_angle()
self.broadcast_image = None
self.prefix_box = gtk.HBox()
self.pack_start(self.prefix_box, False, False)
self.pack_start(self.label, True, True)
self.update_button()
self.connect('button-press-event', self.show_popupmenu)
self.show_all()
def show_popupmenu(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3 and self.terminal:
popupmenu = GshellTabPopupMenu(tablabel=self, notebook=self.notebook)
popupmenu.popup(None, None, None, event.button, event.time)
return False
def on_enable_broadcast(self, widget, *args):
if self.terminal:
self.terminal.emit('enable-broadcast', self)
def enable_log(self, widget, *args):
if self.terminal and self.terminal.logger:
if self.terminal.logger.logging:
self.terminal.logger.stop_logger()
else:
if self.terminal.host and self.terminal.host['log']:
self.terminal.logger.start_logger(self.terminal.host['log'])
else:
self.terminal.logger.start_logger()
def update_button(self):
self.button = gtk.Button()
self.icon = gtk.Image()
self.icon.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
self.button.set_focus_on_click(False)
self.button.set_relief(gtk.RELIEF_NONE)
style = gtk.RcStyle()
style.xthickness = 0
style.ythickness = 0
self.button.modify_style(style)
self.button.add(self.icon)
self.button.connect('clicked', self.on_close)
self.button.set_name('tab-close-button')
if hasattr(self.button, 'set_tooltip_text'):
self.button.set_tooltip_text('Close Tab')
self.pack_start(self.button, False, False)
self.show_all()
def update_angle(self):
"""Update the angle of a label"""
position = self.notebook.get_tab_pos()
if position == gtk.POS_LEFT:
if hasattr(self, 'set_orientation'):
self.set_orientation(gtk.ORIENTATION_VERTICAL)
self.label.set_angle(90)
elif position == gtk.POS_RIGHT:
if hasattr(self, 'set_orientation'):
self.set_orientation(gtk.ORIENTATION_VERTICAL)
self.label.set_angle(270)
else:
if hasattr(self, 'set_orientation'):
self.set_orientation(gtk.ORIENTATION_HORIZONTAL)
self.label.set_angle(0)
def on_close(self, widget, data=None):
print 'GshellTabLabel::on_close called'
self.emit('close-clicked', self)
def mark_close(self):
text = self.label._label.get_text()
self.label._label.set_markup("<span color='darkgray' strikethrough='true'>%s</span>" % text)
def unmark_close(self):
text = self.label._label.get_text()
self.label.set_text(text)
|
from base import Geometry
import os
import json
import time
import arcpy
import calendar
import datetime
from base import BaseFilter
########################################################################
class LayerDefinitionFilter(BaseFilter):
"""
Allows you to filter the features of individual layers in the
query by specifying definition expressions for those layers. A
definition expression for a layer that is published with the
service will always be honored.
"""
_ids = []
_filterTemplate = {"layerId" : "", "where" : "", "outFields" : "*"}
_filter = []
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
pass
#----------------------------------------------------------------------
def addFilter(self, layer_id, where=None, outFields="*"):
""" adds a layer definition filter """
import copy
f = copy.deepcopy(self._filterTemplate)
f['layerId'] = layer_id
f['outFields'] = outFields
if where is not None:
f['where'] = where
if f not in self._filter:
self._filter.append(f)
#----------------------------------------------------------------------
def removeFilter(self, filter_index):
""" removes a layer filter based on position in filter list """
f = self._filter[filter_index]
self._filter.remove(f)
#----------------------------------------------------------------------
def removeAll(self):
""" removes all items from the filter """
self._filter = []
#----------------------------------------------------------------------
@property
def filter(self):
""" returns the filter object as a list of layer defs """
return self._filter
########################################################################
class GeometryFilter(BaseFilter):
""" creates a geometry filter for queries
Inputs:
geomObject - a common.Geometry object
spatialFilter - The spatial relationship to be applied on the
input geometry while performing the query. The
supported spatial relationships include
intersects, contains, envelope intersects,
within, etc. The default spatial relationship
is intersects (esriSpatialRelIntersects).
Raises:
AttributeError for invalid inputs
"""
_allowedFilters = ["esriSpatialRelIntersects",
"esriSpatialRelContains",
"esriSpatialRelCrosses",
"esriSpatialRelEnvelopeIntersects",
"esriSpatialRelIndexIntersects",
"esriSpatialRelOverlaps",
"esriSpatialRelTouches",
"esriSpatialRelWithin"]
_geomObject = None
_spatialAction = None
_geomType = None
_spatialReference = None
#----------------------------------------------------------------------
def __init__(self, geomObject, spatialFilter="esriSpatialRelIntersects"):
"""Constructor"""
if isinstance(geomObject, Geometry) and \
spatialFilter in self._allowedFilters:
self._geomObject = geomObject
self._spatialAction = spatialFilter
self._geomType = geomObject.type
self._spatialReference = geomObject.spatialReference
else:
raise AttributeError("geomObject must be a geometry object and "+ \
"spatialFilter must be of value: " + \
"%s" % ", ".join(self._allowedFilters))
#----------------------------------------------------------------------
@property
def spatialRelation(self):
""" gets the filter type """
return self._spatialAction
#----------------------------------------------------------------------
@spatialRelation.setter
def spatialRelation(self, value):
if value.lower() in \
[x.lower() for x in self._allowedFilters]:
self._spatialAction = value
else:
raise AttributeError("spatialRelation must be values of " + \
"%s" % ", ".join(self._allowedFilters))
#----------------------------------------------------------------------
@property
def geometryType(self):
""" returns the geometry type """
return self._geomObject.type
#----------------------------------------------------------------------
@property
def geometry(self):
""" gets the geometry object used by the filter """
return self._geomObject
#----------------------------------------------------------------------
@geometry.setter
def geometry(self, geometry):
""" sets the geometry value """
if isinstance(geometry, Geometry):
self._geomObject = geometry
else:
raise AttributeError("geometry must be a common.Geometry object")
#----------------------------------------------------------------------
@property
def filter(self):
""" returns the key/value pair of a geometry filter """
return {"geometryType":self.geometryType,
"geometry": self._geomObject.asDictionary,
"spatialRel": self.spatialRelation,
"inSR" : self._geomObject.spatialReference}
#----------------------------------------------------------------------
########################################################################
class TimeFilter(BaseFilter):
""" Implements the time filter """
_startTime = None
_endTime = None
#----------------------------------------------------------------------
def __init__(self, start_time, time_zone="UTC", end_time=None):
"""Constructor"""
self._startTime = start_time
self._endTime = end_time
self._tz = time_zone
#----------------------------------------------------------------------
@property
def filter(self):
if not self._endTime is None:
val = "%s, %s" % (self._startTime, self._endTime)
return val
else:
return "%s" % self._startTime
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
OtbAlgorithmProvider.py
-----------------------
Date : 2018-01-30
Copyright : (C) 2018 by CNES
Email : rashad dot kanavath at c-s fr
****************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Rashad Kanavath'
__date__ = '2018-01-30'
__copyright__ = '(C) 2018 by CNES'
import os
import re
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis, QgsApplication, QgsProcessingProvider, QgsMessageLog)
from qgis import utils
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.algs.otb.OtbUtils import OtbUtils
from processing.algs.otb.OtbAlgorithm import OtbAlgorithm
class OtbAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
#!hack for 6.6!#
self.version = '6.6.0'
def load(self):
group = self.name()
ProcessingConfig.settingIcons[group] = self.icon()
ProcessingConfig.addSetting(Setting(group, OtbUtils.ACTIVATE, self.tr('Activate'), True))
ProcessingConfig.addSetting(Setting(group, OtbUtils.FOLDER,
self.tr("OTB folder"),
OtbUtils.otbFolder(),
valuetype=Setting.FOLDER,
validator=self.validateOtbFolder
))
ProcessingConfig.addSetting(Setting(group, OtbUtils.APP_FOLDER,
self.tr("OTB application folder"),
OtbUtils.appFolder(),
valuetype=Setting.MULTIPLE_FOLDERS,
validator=self.validateAppFolders
))
ProcessingConfig.addSetting(Setting(group, OtbUtils.SRTM_FOLDER,
self.tr("SRTM tiles folder"),
OtbUtils.srtmFolder(),
valuetype=Setting.FOLDER
))
ProcessingConfig.addSetting(Setting(group, OtbUtils.GEOID_FILE,
self.tr("Geoid file"),
OtbUtils.geoidFile(),
valuetype=Setting.FOLDER
))
ProcessingConfig.addSetting(Setting(group, OtbUtils.MAX_RAM_HINT,
self.tr("Maximum RAM to use"),
OtbUtils.maxRAMHint(),
valuetype=Setting.STRING
))
ProcessingConfig.addSetting(Setting(group, OtbUtils.LOGGER_LEVEL,
self.tr("Logger level"),
OtbUtils.loggerLevel(),
valuetype=Setting.STRING,
validator=self.validateLoggerLevel
))
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
for setting in OtbUtils.settingNames():
ProcessingConfig.removeSetting(setting)
def isActive(self):
return ProcessingConfig.getSetting(OtbUtils.ACTIVATE)
def setActive(self, active):
ProcessingConfig.setSettingValue(OtbUtils.ACTIVATE, active)
def createAlgsList(self):
algs = []
try:
folder = OtbUtils.otbFolder()
alg_names = []
algs_txt = self.algsFile(folder)
with open(algs_txt) as lines:
line = lines.readline().strip('\n').strip()
if line != '' and line.startswith('#'):
line = lines.readline().strip('\n').strip()
while line != '' and not line.startswith('#'):
data = line.split('|')
descriptionFile = self.descrFile(folder, str(data[1]) + '.txt')
group, name = str(data[0]), str(data[1])
if name not in alg_names:
algs.append(OtbAlgorithm(group, name, descriptionFile))
#avoid duplicate algorithms from algs.txt file (possible but rare)
alg_names.append(name)
line = lines.readline().strip('\n').strip()
except Exception as e:
import traceback
errmsg = "Could not open OTB algorithm from file: \n" + descriptionFile + "\nError:\n" + traceback.format_exc()
QgsMessageLog.logMessage(self.tr(errmsg), self.tr('Processing'), Qgis.Critical)
return algs
def loadAlgorithms(self):
if not self.canBeActivated():
return
version_file = os.path.join(OtbUtils.otbFolder(), 'share', 'doc', 'otb', 'VERSION')
if not os.path.isfile(version_file):
version_file = os.path.join(OtbUtils.otbFolder(), 'VERSION')
if os.path.isfile(version_file):
with open(version_file) as vf:
vlines = vf.readlines()
vlines = [l.strip() for l in vlines]
vline = vlines[0]
if 'OTB Version:' in vline:
self.version = vline.split(':')[1].strip()
QgsMessageLog.logMessage(self.tr("Loading OTB '{}'.".format(self.version)), self.tr('Processing'), Qgis.Info)
self.algs = self.createAlgsList()
for a in self.algs:
self.addAlgorithm(a)
self.algs = []
def canBeActivated(self):
if not self.isActive():
return False
folder = OtbUtils.otbFolder()
if folder and os.path.exists(folder):
if os.path.isfile(self.algsFile(folder)):
return True
utils.iface.messageBar().pushWarning("OTB", "Cannot find '{}'. OTB provider will be disabled".format(self.algsFile(folder)))
self.setActive(False)
return False
def validateLoggerLevel(self, v):
allowed_values = ['DEBUG', 'INFO', 'WARNING', 'CRITICAL', 'FATAL']
if v in allowed_values:
return True
else:
raise ValueError(self.tr("'{}' is not valid. Possible values are '{}'".format(v, ', '.join(allowed_values))))
def validateAppFolders(self, v):
if not self.isActive():
return
if not v:
self.setActive(False)
raise ValueError(self.tr('Cannot activate OTB provider'))
folder = OtbUtils.otbFolder()
otb_app_dirs = self.appDirs(v)
if len(otb_app_dirs) < 1:
self.setActive(False)
raise ValueError(self.tr("'{}' does not exist. OTB provider will be disabled".format(v)))
#isValid is True if there is atleast one valid otb application is given path
isValid = False
descr_folder = self.descrFolder(folder)
for app_dir in otb_app_dirs:
if not os.path.exists(app_dir):
continue
for otb_app in os.listdir(app_dir):
if not otb_app.startswith('otbapp_') or \
'TestApplication' in otb_app or \
'ApplicationExample' in otb_app:
continue
app_name = os.path.basename(otb_app).split('.')[0][7:]
dfile = os.path.join(descr_folder, app_name + '.txt')
isValid = True
if not os.path.exists(dfile):
cmdlist = [OtbUtils.getExecutableInPath(folder, 'otbQgisDescriptor'),
app_name, app_dir, descr_folder + '/']
commands = ' '.join(cmdlist)
QgsMessageLog.logMessage(self.tr(commands), self.tr('Processing'), Qgis.Critical)
OtbUtils.executeOtb(commands, feedback=None)
if isValid:
# if check needed for testsing
if utils.iface is not None:
utils.iface.messageBar().pushInfo("OTB", "OTB provider is activated from '{}'.".format(folder))
else:
self.setActive(False)
raise ValueError(self.tr("No OTB algorithms found in '{}'. OTB will be disabled".format(','.join(otb_app_dirs))))
def normalize_path(self, p):
# https://stackoverflow.com/a/20713238/1003090
return os.path.normpath(os.sep.join(re.split(r'\\|/', p)))
def validateOtbFolder(self, v):
if not self.isActive():
return
if not v or not os.path.exists(v):
self.setActive(False)
raise ValueError(self.tr("'{}' does not exist. OTB provider will be disabled".format(v)))
path = self.normalize_path(v)
app_launcher_path = OtbUtils.getExecutableInPath(path, 'otbApplicationLauncherCommandLine')
if not os.path.exists(app_launcher_path):
self.setActive(False)
raise ValueError(self.tr("Cannot find '{}'. OTB will be disabled".format(app_launcher_path)))
def algsFile(self, d):
return os.path.join(self.descrFolder(d), 'algs.txt')
def descrFolder(self, d):
#!hack for 6.6!#
if os.path.exists(os.path.join(d, 'description')):
return os.path.join(d, 'description')
else:
return os.path.join(d, 'share', 'otb', 'description')
def descrFile(self, d, f):
return os.path.join(self.descrFolder(d), f)
def appDirs(self, v):
app_dirs = []
for f in v.split(';'):
if f is not None and os.path.exists(f):
app_dirs.append(self.normalize_path(f))
return app_dirs
def name(self):
return 'OTB'
def longName(self):
return 'OTB ({})'.format(self.version) if self.version is not None else 'OTB'
def id(self):
return 'otb'
def supportsNonFileBasedOutput(self):
"""
OTB Provider doesn't support non file based outputs
"""
return False
def icon(self):
return QgsApplication.getThemeIcon("/providerOtb.svg")
def tr(self, string, context=''):
if context == '':
context = 'OtbAlgorithmProvider'
return QCoreApplication.translate(context, string)
def defaultVectorFileExtension(self, hasGeometry=True):
return 'shp'
def defaultRasterFileExtension(self):
return 'tif'
def supportedOutputTableExtensions(self):
return ['dbf']
|
# -*- coding: utf-8 -*-
"""
Map
---
All the part of crossfolium that is about drawing things in a folium.Map.
"""
from jinja2 import Template
from folium.map import FeatureGroup
from folium.plugins import HeatMap
from branca.element import Figure, JavascriptLink, CssLink
class FeatureGroupFilter(FeatureGroup):
def __init__(self, crossfilter, name=None, fit_bounds=False,
circle_radius=None, color="#0000ff", opacity=1., **kwargs):
"""
"""
super(FeatureGroupFilter, self).__init__(**kwargs)
self._name = 'FeatureGroupFilter'
self.tile_name = name if name is not None else self.get_name()
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self.circle_radius = circle_radius
self.color = color
self.opacity = opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.feature_group = new L.FeatureGroup();
{{this.get_name()}}.marker_function = function(d) {return L.marker([0,0]);}
{{this.get_name()}}.updateFun = function() {
this.feature_group.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity)
for (var i in dimVals) {
var d = dimVals[i];
var marker = this.marker_function(d);
this.feature_group.addLayer(marker);
}
{{this._parent.get_name()}}.addLayer(this.feature_group);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.feature_group.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class HeatmapFilter(HeatMap):
def __init__(self, crossfilter, name=None, fit_bounds=False, **kwargs):
"""
"""
super(HeatmapFilter, self).__init__([], **kwargs)
self._name = 'HeatmapFilter'
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.heatmap = new L.heatLayer(
{},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
max: {{this.max_val}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
{{this.get_name()}}.updateFun = function() {
// this.heatmap.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity);
var latlngs = [];
for (var i in dimVals) {
var d = dimVals[i];
latlngs.push([d.lat, d.lng]);
}
{{this.get_name()}}.heatmap.setLatLngs(latlngs);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.heatmap.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class MarkerClusterFilter(FeatureGroup):
def __init__(self, crossfilter, lat='lat', lng='lng', name=None, fit_bounds=False,
max_cluster_radius=None, geofilter=True,
circle_radius=None, color="#0000ff", opacity=1., **kwargs):
"""
"""
super(MarkerClusterFilter, self).__init__(**kwargs)
self._name = 'MarkerClusterFilter'
self.tile_name = name if name is not None else self.get_name()
self.crossfilter = crossfilter
self.lat = lat
self.lng = lng
self.fit_bounds = fit_bounds
self.circle_radius = circle_radius
self.color = color
self.opacity = opacity
self.max_cluster_radius = max_cluster_radius
self.geofilter = geofilter
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.marker_function = function(p) {return L.marker([p["{{this.lat}}"],p["{{this.lng}}"]]);}
{{this.get_name()}}.chart = new L.markerClusterGroup({
{%if this.max_cluster_radius %}maxClusterRadius:{{this.max_cluster_radius}},{%endif%}
});
{% if this.geofilter %}
{{this.get_name()}}.latDimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(p) { return p["{{this.lat}}"]; });
{{this.get_name()}}.lngDimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(p) { return p["{{this.lng}}"]; });
{{this._parent.get_name()}}.on('moveend', function(){
var bounds = {{this._parent.get_name()}}.getBounds();
{{this.get_name()}}.latDimension.filterRange([bounds._southWest.lat,bounds._northEast.lat]);
{{this.get_name()}}.lngDimension.filterRange([bounds._southWest.lng,bounds._northEast.lng]);
//dc.filterAll();
dc.renderAll();
});
{% endif %}
{{this.get_name()}}.updateFun = function() {
this.chart.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity)
for (var i in dimVals) {
var d = dimVals[i];
var marker = this.marker_function(d);
this.chart.addLayer(marker);
}
{{this._parent.get_name()}}.addLayer(this.chart);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.chart.getBounds());{% endif %}
}
dc.dataTable('#{{this.get_name()}}footable')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""") # noqa
def render(self, **kwargs):
super(MarkerClusterFilter, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster.js"), # noqa
name='markerclusterjs')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.css"), # noqa
name='markerclustercss')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.Default.css"), # noqa
name='markerclusterdefaultcss')
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
from collections import defaultdict, Mapping
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _DetectInlineableTypes(schema):
"""Look for documents that are only referenced once and mark them as inline.
Actual inlining is done by _InlineDocs.
"""
if not schema.get('types'):
return
ignore = frozenset(('value', 'choices'))
refcounts = defaultdict(int)
# Use an explicit stack instead of recursion.
stack = [schema]
while stack:
node = stack.pop()
if isinstance(node, list):
stack.extend(node)
elif isinstance(node, Mapping):
if '$ref' in node:
refcounts[node['$ref']] += 1
stack.extend(v for k, v in node.iteritems() if k not in ignore)
for type_ in schema['types']:
if not 'noinline_doc' in type_:
if refcounts[type_['id']] == 1:
type_['inline_doc'] = True
def _InlineDocs(schema):
"""Replace '$ref's that refer to inline_docs with the json for those docs.
"""
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
for k in ('description', 'id', 'inline_doc'):
type_.pop(k, None)
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JSCModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json, ref_resolver, disable_refs, idl=False):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
if idl:
_DetectInlineableTypes(clean_json)
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'description': self._namespace.description,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': [self._GenerateProperty(p) for p in event.params],
'callback': self._GenerateCallback(event.callback),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
if event.callback is not None:
# Show the callback as an extra parameter.
event_dict['parameters'].append(
self._GenerateCallbackProperty(event.callback))
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parent_name'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, base_path):
def create_compiled_fs(fn, category):
return compiled_fs_factory.Create(fn, APIDataSource, category=category)
self._permissions_cache = create_compiled_fs(self._LoadPermissions,
'permissions')
self._json_cache = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, False),
'json')
self._idl_cache = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, False),
'idl')
# These caches are used if an APIDataSource does not want to resolve the
# $refs in an API. This is needed to prevent infinite recursion in
# ReferenceResolver.
self._json_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, True),
'json-no-refs')
self._idl_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, True),
'idl-no-refs')
self._idl_names_cache = create_compiled_fs(self._GetIDLNames, 'idl-names')
self._names_cache = create_compiled_fs(self._GetAllNames, 'names')
self._base_path = base_path
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request, disable_refs=False):
"""Create an APIDataSource. |disable_refs| specifies whether $ref's in
APIs being processed by the |ToDict| method of _JSCModel follows $ref's
in the API. This prevents endless recursion in ReferenceResolver.
"""
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
if not disable_refs and self._ref_resolver_factory is None:
logging.error('ReferenceResolver.Factory was never set in '
'APIDataSource.Factory.')
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._json_cache_no_refs,
self._idl_cache_no_refs,
self._names_cache,
self._idl_names_cache,
self._base_path,
samples,
disable_refs)
def _LoadPermissions(self, file_name, json_str):
return json_parse.Parse(json_str)
def _LoadJsonAPI(self, api, disable_refs):
return _JSCModel(
json_parse.Parse(api)[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _LoadIdlAPI(self, api, disable_refs):
idl = idl_parser.IDLParser().ParseData(api)
return _JSCModel(
idl_schema.IDLSchema(idl).process()[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs,
idl=True).ToDict()
def _GetIDLNames(self, base_dir, apis):
return self._GetExtNames(apis, ['idl'])
def _GetAllNames(self, base_dir, apis):
return self._GetExtNames(apis, ['json', 'idl'])
def _GetExtNames(self, apis, exts):
return [model.UnixName(os.path.splitext(api)[0]) for api in apis
if os.path.splitext(api)[1][1:] in exts]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
json_cache_no_refs,
idl_cache_no_refs,
names_cache,
idl_names_cache,
base_path,
samples,
disable_refs):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._json_cache_no_refs = json_cache_no_refs
self._idl_cache_no_refs = idl_cache_no_refs
self._names_cache = names_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
self._disable_refs = disable_refs
def _GetFeatureFile(self, filename):
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
def _GetFeatureData(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
feature_data = self._GetFeatureFile(filename).get(path, None)
if feature_data is not None:
break
# There are specific cases in which the feature is actually a list of
# features where only one needs to match; but currently these are only
# used to whitelist features for specific extension IDs. Filter those out.
if isinstance(feature_data, list):
feature_list = feature_data
feature_data = None
for single_feature in feature_list:
if 'whitelist' in single_feature:
continue
if feature_data is not None:
# Note: if you are seeing the exception below, add more heuristics as
# required to form a single feature.
raise ValueError('Multiple potential features match %s. I can\'t '
'decide which one to use. Please help!' % path)
feature_data = single_feature
if feature_data and feature_data['channel'] in ('trunk', 'dev', 'beta'):
feature_data[feature_data['channel']] = True
return feature_data
def _GenerateHandlebarContext(self, handlebar_dict, path):
handlebar_dict['permissions'] = self._GetFeatureData(path)
handlebar_dict['samples'] = _LazySamplesGetter(path, self._samples)
return handlebar_dict
def _GetAsSubdirectory(self, name):
if name.startswith('experimental_'):
parts = name[len('experimental_'):].split('_', 1)
parts[1] = 'experimental_%s' % parts[1]
return '/'.join(parts)
return name.replace('_', '/', 1)
def get(self, key):
if key.endswith('.html') or key.endswith('.json') or key.endswith('.idl'):
path, ext = os.path.splitext(key)
else:
path = key
unix_name = model.UnixName(path)
idl_names = self._idl_names_cache.GetFromFileListing(self._base_path)
names = self._names_cache.GetFromFileListing(self._base_path)
if unix_name not in names and self._GetAsSubdirectory(unix_name) in names:
unix_name = self._GetAsSubdirectory(unix_name)
if self._disable_refs:
cache, ext = (
(self._idl_cache_no_refs, '.idl') if (unix_name in idl_names) else
(self._json_cache_no_refs, '.json'))
else:
cache, ext = ((self._idl_cache, '.idl') if (unix_name in idl_names) else
(self._json_cache, '.json'))
return self._GenerateHandlebarContext(
cache.GetFromFile('%s/%s%s' % (self._base_path, unix_name, ext)),
path)
|
#!/usr/bin/python
from header import *
from glyph import *
sys.path.append("pyglet-hg")
import pyglet
xpad = 15 # npix boarder
ypad = 100
PixPerCell = 3
SecPerTick = .1
SpotRadius = 1
DelayGlow = 4
window = pyglet.window.Window(
width = 2*xpad + ( PixPerCell * XGridSize ),
height= 2*ypad + ( PixPerCell * YGridSize ) )
# Print all the events
#window.push_handlers(pyglet.window.event.WindowEventLogger())
# We only want to calculate once.
gridNums = []
def gridNumbers( x, y, num ):
gridNums.append( pyglet.text.Label(
str(num),
color=4*(155,),
font_name='Times New Roman',
font_size=9,
x=x, y=y,
anchor_x='center', anchor_y='center')
)
gridDrawNpts = 0
gridDrawPts = ()
xmin = xpad
ymin = ypad
xmax = window.width - xpad
ymax = window.height - ypad
def line(x0,y0,x1,y1):
global gridDrawNpts, gridDrawPts
gridDrawPts = gridDrawPts + ( x0, y0, x1, y1 )
gridDrawNpts = gridDrawNpts + 2
for x in range( xmin, xmax, 8*PixPerCell ):
line( x, ymin, x, ymax )
gridNumbers( x, ymin-6, (x-xmin)/PixPerCell )
line( xmax, ymin, xmax, ymax )
for y in range( ymin, ymax, 8*PixPerCell ):
line( xmin, y, xmax, y )
gridNumbers( xmin-6, y, (y-ymin)/PixPerCell )
line( xmin, ymax, xmax, ymax )
del line, xmin, ymin, xmax, ymax, gridNumbers
def gridDraw():
global gridDrawNpts, gridDrawPts, gridNums
pyglet.gl.glColor4f( .3, .3, .3, .3 )
pyglet.graphics.draw( gridDrawNpts, pyglet.gl.GL_LINES,
( 'v2i', gridDrawPts ) )
for n in gridNums:
n.draw()
# Take in a cell:
# (x,y) such that 0 <= x < XGridSize, 0 <= y < YGridSize
# return a pixel coordinate of the center of the cell.
def cell2pix( x, y ):
tx = xpad + PixPerCell // 2
ty = ypad + PixPerCell // 2
cx = x * PixPerCell + tx
cy = y * PixPerCell + ty
return ( cx, cy )
def pix2cell( px, py ):
px = px - xpad
if 0 > px: return None
py = py - ypad
if 0 > py: return None
x = int( float(px) / PixPerCell )
if x >= XGridSize: return None
y = int( float(py) / PixPerCell )
if y >= YGridSize: return None
return ( x, y )
def spotDraw( x, y, i ):
x, y = cell2pix( x, y )
# Draw middle
pyglet.gl.glColor4f( i, i, i, i )
pyglet.graphics.draw( 1, pyglet.gl.GL_POINTS,
('v2i', ( x, y ) ) )
# SpotRadius * PixPerCell is pixle radius
npr = int( SpotRadius * PixPerCell )
fracDec = float(i)/npr
for j in range(1, npr+1,1):
pts = []
i = i - fracDec
for xt,yt in ((x+j, y),(x-j,y),(x+j,y+j),(x-j,y-j),(x-j,y+j),(x+j,y-j),(x,y+j),(x,y-j)):
pts.append( xt )
pts.append( yt )
pyglet.gl.glColor4f( i, i, i, i )
pyglet.graphics.draw( 8, pyglet.gl.GL_POINTS,
('v2i', pts ) )
spotsGrid = []
for x in range(XGridSize):
tmp = []
for y in range(YGridSize):
tmp.append(0)
spotsGrid.append(tmp)
def allSpotsDraw(dt):
global spotsGrid
for x in range(XGridSize):
for y in range(YGridSize):
if 0 < spotsGrid[x][y]:
spotDraw( x, y, spotsGrid[x][y] )
spotsGrid[x][y] = spotsGrid[x][y] - ( dt / SecPerTick / DelayGlow )
def calcLabel(px,py):
cxy = pix2cell(px,py)
if cxy is None:
t = "At pixel ( , )."
else:
t = "At pixel (%3d,%3d)."%( cxy[0], cxy[1] )
global label
label = pyglet.text.Label(
t,
font_name='Times New Roman',
font_size=18,
x=xpad, y=window.height-xpad,
anchor_x='left', anchor_y='top')
label = None
calcLabel(-1,-1)
labelGridSize = pyglet.text.Label(
"Each grid square is 8 by 8 pixels.",
font_name='Times New Roman',
font_size=18,
x=window.width-xpad, y=window.height-xpad,
anchor_x='right', anchor_y='top')
labelLitBlanked = pyglet.text.Label(
"Drag for lit line, click ends for blanked line.",
font_name='Times New Roman',
font_size=18,
x=window.width//2, y=window.height-xpad-30,
anchor_x='center', anchor_y='top')
labelSaving = pyglet.text.Label(
"Close window to enter name and save the glyph (or not).",
font_name='Times New Roman',
font_size=18,
x=window.width//2, y=xpad,
anchor_x='center', anchor_y='bottom')
def lineDraw( color, c0, c1 ):
pyglet.gl.glColor4f( *color )
pyglet.graphics.draw( 2, pyglet.gl.GL_LINES,
( 'v2i', cell2pix( *c0 ) + cell2pix( *c1 ) ) )
def litLineDraw( c0, c1 ):
lineDraw( ( 1, 0, 1, 1 ), c0, c1 )
def blankedLineDraw( c0, c1 ):
lineDraw( ( .5, .5, .5, .4 ), c0, c1 )
# This is the glyph so far
g = []
def gDraw():
lit = True
lastCell = None
for i in g:
if "on" == i:
lit = True
elif "off" == i:
lit = False
else:
if lastCell is None:
lastCell = i
else:
if lit:
litLineDraw( lastCell, i )
else:
blankedLineDraw( lastCell, i )
lastCell = i
pressXY = None
motionLine = None
dragLine = None
@window.event
def on_mouse_press( px, py, button, modifiers ):
tmp = pix2cell( px, py )
if tmp is None:
return
x = tmp[0]
y = tmp[1]
global pressXY
if pressXY is None:
pressXY = ( x, y )
elif pressXY[0] == x and pressXY[1] == y:
return
else:
g.append( "off" )
g.append( pressXY )
g.append( (x,y), )
pressXY = ( x, y )
@window.event
def on_mouse_release( px, py, button, modifiers ):
tmp = pix2cell( px, py )
if tmp is None:
return
x = tmp[0]
y = tmp[1]
global pressXY
if pressXY is None:
return
elif pressXY[0] == x and pressXY[1] == y:
return
else:
g.append( "on" )
g.append( pressXY )
g.append( (x,y), )
pressXY = ( x, y )
@window.event
def on_mouse_motion(x, y, dx, dy):
global motionLine
global dragLine
dragLine = None
calcLabel(x,y)
try:
cx, cy = pix2cell( x, y ) # on sep line for TypeError checking
motionLine = (g[-1], ( cx, cy ))
except (IndexError, TypeError):
motionLine = None
@window.event
def on_mouse_drag( x, y, dx, dy, buttons, modifiers ):
global dragLine
global motionLine
motionLine = None
calcLabel(x,y)
try:
cx, cy = pix2cell( x, y ) # on sep line for TypeError checking
dragLine = (g[-1], ( cx, cy ))
except (IndexError, TypeError):
dragLine = None
@window.event
def on_draw():
window.clear()
label.draw()
labelGridSize.draw()
labelLitBlanked.draw()
labelSaving.draw()
gridDraw()
#allSpotsDraw(0)
gDraw()
if motionLine is not None:
blankedLineDraw( *motionLine )
if dragLine is not None:
litLineDraw( *dragLine )
@window.event
def on_mouse_enter( x, y ):
pass
@window.event
def on_mouse_leave( x, y ):
global pressXY
calcLabel(-1,-1)
pressXY = None
dragLine = None
motionLine = None
#pyglet.clock.schedule_interval(allSpotsDraw, SecPerTick)
pyglet.app.run()
try:
import readline
i = raw_input("Enter a name to save the glyph (leave blank to abandon): ")
if "" == i:
i = None
except EOFError:
i = None
if i is not None:
gs = glyphCreate( str(i), g )
glyphDump( gs )
wrn("Saved glyph \"%s\"."%(gs['name']))
else:
wrn("Not saving.")
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import random
from fife import fife
from agent import Agent
from hero import Hero
from girl import Girl
from beekeeper import Beekeeper
from fife.extensions.fife_settings import Setting
#TDS = Setting(app_name="rio_de_hola")
_STATE_ATTACK, _STATE_RUNAWAY, _STATE_FLY, _STATE_HIT_BEE = xrange(4)
class Bee(Agent):
def __init__(self, settings, model, agentName, layer, uniqInMap=True):
super(Bee, self).__init__(settings, model, agentName, layer, uniqInMap)
self.state = _STATE_FLY
self.hero = self.layer.getInstance('PC')
self.girl = self.layer.getInstance('NPC:girl')
self.beekeeper = self.layer.getInstance('beekeeper')
def onInstanceActionFinished(self, instance, action):
self.start()
def onInstanceActionCancelled(self, instance, action):
pass
def start(self):
self.state = _STATE_FLY
self.facingLoc = self.agent.getLocation()
bl = self.facingLoc.getLayerCoordinates()
bl.x += random.randint(-5, 5)
bl.y += random.randint(-5, 5)
# reset the coordinates of bees
self.facingLoc.setLayerCoordinates(bl)
self.agent.move('fly', self.facingLoc , 4 * self.settings.get("rio", "TestAgentSpeed"))
gl, hl = self.getBoyGirlPosi(self.girl, self.hero)
self.runaway(bl, gl)
# self.attack(bl, gl, hl)
target = self.agent.getLocationRef()
self.hit_bee(target, bl, hl)
# get the coordinates of the boy and girl
def getBoyGirlPosi(self, name1, name2):
self.girlLoc = name1.getLocation()
gl = fife.Location(self.girlLoc).getExactLayerCoordinates()
self.heroLoc = name2.getLocation()
hl = self.heroLoc.getLayerCoordinates()
return gl,hl
def runaway(self, bl, gl):
self.state = _STATE_RUNAWAY
threshold = 100
dist1 = (gl.x-bl.x)*(gl.x-bl.x)+(gl.y-bl.y)*(gl.y-bl.y)
if (dist1 < threshold):
bl.x += 40
bl.y += 40
self.facingLoc.setLayerCoordinates(bl)
self.agent.move('fly', self.facingLoc, 20 * self.settings.get("rio", "TestAgentSpeed"))
def attack(self, bl, gl, hl):
self.state = _STATE_ATTACK
threshold = 100
dist1 = (gl.x-bl.x)*(gl.x-bl.x)+(gl.y-bl.y)*(gl.y-bl.y)
dist2 = (hl.x-bl.x)*(hl.x-bl.x)+(hl.y-bl.y)*(hl.y-bl.y)
if (dist1 < threshold or dist2 < threshold):
self.facingLoc.setLayerCoordinates(bl)
self.agent.actOnce('attack', self.facingLoc, 20 * self.settings.get("rio", "TestAgentSpeed"))
def hit_bee(self,target, bl, hl):
self.state = _STATE_HIT_BEE
dist2 = (hl.x-bl.x)*(hl.x-bl.x)+(hl.y-bl.y)*(hl.y-bl.y)
threshold = 100
if (dist2 < threshold):
self.hero.actOnce('kick', target)
self.agent.actRepeat('fall')
# the beekeeper will move to the bees's position and talk with them
self.facing = self.beekeeper.getLocation()
self.facing.setLayerCoordinates(bl)
self.beekeeper.move('walk',self.facing,10 * self.settings.get("rio", "TestAgentSpeed"))
#self.beekeeper.actOnce('talk',target)
|
#!/usr/bin/env python
##
## Cherokee 0.9.x to 0.10.x configuration converter
##
## Copyright: Alvaro Lopez Ortega <[email protected]>
## Licensed: GPL v2
##
import sys
def convert (fin, fout):
# Open file
f = open(fin, 'r')
lines = [x.strip() for x in f.readlines()]
f.close()
# Replace 'elapse' by 'lapse'
lines = map (lambda x: x.replace('!elapse', '!lapse'), lines)
# Replace duplicate mime entries
lines = map (lambda x: x.replace('midi,kar,mpga,mpega', 'midi,kar,mpega'), lines)
lines = map (lambda x: x.replace('mpeg3,mp2,dl', 'mpeg3,dl'), lines)
lines = map (lambda x: x.replace('gsm,m3u,wma,wax', 'gsm,wma,wax'), lines)
# Write it down
f = open (fout, 'w+')
f.write ('\n'.join(lines))
f.close()
def main ():
if len(sys.argv) < 3:
print "USAGE:"
print " %s /path/cherokee.conf.09 /path/cherokee.conf.010" % (sys.argv[0])
print
raise SystemExit
convert (sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
|
import pdb
import datetime
from django.db.models import Q
from sched_ev.models import Event, L_MONTH
from .config import TZ_LOCAL, site_names, end_of_month
def parse_period(period, month=False, date=False):
# get starting date
start_year = int(period[ 0: 4])
start_month = int(period[ 4: 6])
if date:
start_date = int(period[ 6: 8])
period = period[9:]
elif month:
start_date = 1
period = period[7:]
start = TZ_LOCAL.localize(datetime.datetime(start_year, start_month, start_date))
# get ending date, just before midnight
end_year = int(period[ 0: 4])
end_month = int(period[ 4: 6])
if date:
end_date = int(period[ 6: 8])
elif month:
end_date = end_of_month(end_year, end_month)
end = TZ_LOCAL.localize(datetime.datetime(end_year, end_month, end_date))
if date:
period_str = '{} {}, {}  -  {} {}, {}'.format(
L_MONTH[start_month-1][1], start_date, start_year,
L_MONTH[end_month -1][1], end_date , end_year)
elif month:
period_str = '{} {}  -  {} {}'.format(
L_MONTH[start_month-1][1], start_year,
L_MONTH[end_month -1][1], end_year)
return (start, end, period_str)
def get_events(period, location, event_type):
start, end, period_str = parse_period(period, month=True)
# detect bad period if start is after end
bad_period = True if start > end else False
# get events
q = Q(date_time__gte=start) & Q(date_time__lte=end) & Q(planned=True)
if location != '0':
location = int(location)
q &= Q(location=location)
if event_type != '0':
event_type = int(event_type)
q &= Q(event_type=int(event_type))
events = Event.objects.filter(q).order_by('date_time')
if start >= end:
period = 'bad period'
return (events, period_str)
|
import uuid
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest_framework.decorators import detail_route
from django.core.exceptions import ObjectDoesNotExist
from .models import *
import json
class NodeSerializer(serializers.ModelSerializer):
nodeName = serializers.CharField(source='user.username')
nodePassword = serializers.CharField(source='user.password', write_only=True)
class Meta:
mode = Node
# password is the text form of nodePassword which can only be in hash form.
fields = ('node_url', 'access_to_posts', 'access_to_images','password', 'user', 'username')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password')
extra_kwargs = {
'password': {
'write_only': True
},
}
class AuthorInfoSerializer(serializers.ModelSerializer):
displayName = serializers.CharField(source='user.username')
first_name = serializers.CharField(source='user.first_name', allow_blank=True, required=False)
last_name = serializers.CharField(source='user.last_name', allow_blank=True, required=False)
email = serializers.CharField(source='user.email', allow_blank=True, required=False)
id = serializers.CharField(required=True)
class Meta:
model = Author
fields = ('id', 'displayName', 'first_name', 'last_name',
'email', 'bio', 'host', 'github', 'url')
class ForeignAuthorInfoSerializer(AuthorInfoSerializer):
id = serializers.CharField(required=True)
pass
class CommentSerializer(serializers.ModelSerializer):
author = AuthorInfoSerializer(many=False)
guid = serializers.CharField(source='id', required=False)
pubDate = serializers.DateTimeField(source='published', required=False)
class Meta:
model = Comment
fields = ('id', 'guid', 'comment', 'author', 'pubDate', 'published', 'post', 'contentType', 'foreign_post')
def create(self, validated_data):
foreign_posts = False
try: # if posting directly to /post/id/comments, there is id inside the url
print "GETTING PK FROM REQUEST"
postId = self.context['request'].parser_context.get('kwargs').get('pk')
except:
print "NO PUBLID ID FOUND FOR COMMENTS POST"
try: # If coming from /foreignposts, there is no id inside pk
postId = self.context['foreign_id']
print postId
foreign_post = True
except:
print "NO FOREIGN ID FOR COMMENTS POST"
return None
post = None
foreign_post = None
try:
# Case where we are making local post
post = Post.objects.get(id=postId)
except:
try:
# Case where we are making comments for a foreign post
print "TRYING TO GET FOREIGN POST FOR COMMENT"
print postId
foreign_post = ForeignPost.objects.get(id=postId)
except:
print "POST FOR COMMENT NOT FOUND"
return None
if foreign_post:
try: # if coming from /foreignposts, there will be no user inside
postId = self.context['foreign_id']
print "Creating post from /foreignposts"
author = validated_data.pop('author')
author_url = author.get('url')
try: # Get author
author = Author.objects.get(url=author_url)
except: # in the case where the author wasn't created because they haven't posted yet
user = author.pop('user')
author_id = author.pop('id')
user = User.objects.create(username=author.get('url') + "__" + user.get('username'))
author = Author.objects.create(id=author_id, user=user, **author)
# make comment
comment = Comment.objects.create(author=author, foreign_post=foreign_post, **validated_data)
comment.save()
return comment
except:
print "NOT COMING FROM /foreignposts"
pass
print "COMING FROM /POST/ID/COMMENTS"
author = Author.objects.get(user=self.context.get('request').user)
comment = Comment.objects.create(author=author, foreign_post=foreign_post, **validated_data)
comment.save()
return comment
print "POSTING LOCAL COMMENT"
author = Author.objects.get(user=self.context.get('request').user)
comment = Comment.objects.create(author=author, post=post, **validated_data)
comment.save()
return comment
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class ForeignPostSerializer(serializers.ModelSerializer):
author = ForeignAuthorInfoSerializer(required=True, read_only=False)
id = serializers.CharField(required=True)
comments = serializers.SerializerMethodField('getComments')
class Meta:
model = ForeignPost
fields = ('id', 'title', 'source', 'author', 'origin', 'description', 'content',
'category', 'visibility', 'published','comments', 'contentType')
def create(self, validated_data):
# print "CREATING FOREIGN POST..."
origin = validated_data.get('origin')
content_type = validated_data.pop('contentType')
foreign_author = validated_data.pop('author')
foreign_user = foreign_author.pop('user')
url = foreign_author.get('url')
author = None
postId = validated_data.pop('id')
try:
print url
print foreign_user.get('username')
author = Author.objects.get(url = url)
except ObjectDoesNotExist:
if User.objects.all().filter(username = foreign_author.get('url') + "__" + foreign_user.get('username')).exists():
User.objects.get(username = foreign_author.get('url') + "__" + foreign_user.get('username')).delete()
user = User.objects.create(username = foreign_author.get('url') + "__" + foreign_user.get('username'))
author = Author.objects.create(user = user, **foreign_author)
user.save()
author.save()
if content_type == "type/x-markdown":
content_type = "type/markdown"
# print "SAVING FOREIGN POST..."
try:
post = ForeignPost.objects.get(id=postId)
except:
post = ForeignPost.objects.create(id=postId, author=author, contentType=content_type, **validated_data)
post.save()
return post
# Returns a list of comments
def getComments(self, obj):
commentsQuerySet = Comment.objects.all().filter(foreign_post__id=obj.id).order_by('published')[:5]
serializer = CommentSerializer(commentsQuerySet, many=True)
return serializer.data
class PostSerializer(serializers.ModelSerializer):
author = AuthorInfoSerializer(many = False, read_only = True)
comments = serializers.SerializerMethodField('getComments')
class Meta:
model = Post
fields = ('id', 'title', 'source', 'origin', 'description', 'content',
'category', 'author', 'visibility', 'published', 'contentType', 'comments')
def create(self, validated_data):
id = uuid.uuid4()
source = validated_data.pop('source')
if not source:
source = self.context.get('request').build_absolute_uri() + str(id)
origin = self.context.get('request').build_absolute_uri() +"/"+ str(id)
author = Author.objects.get(user=self.context.get('request').user)
post = Post.objects.create(id=id, origin=origin, source=source, author=author, **validated_data)
post.save()
return post
# Returns a list of comments
def getComments(self, obj):
commentsQuerySet = Comment.objects.all().filter(post__id=obj.id).order_by('published')[:5]
serializer = CommentSerializer(commentsQuerySet, many=True)
return serializer.data
class ImageSerializer(serializers.ModelSerializer):
user = AuthorInfoSerializer(many = False, read_only = True)
class Meta:
model = Image
fields = ('id', 'user', 'origin', 'photo')
def create(self, validated_data):
id = uuid.uuid4()
user = Author.objects.get(user=self.context.get('request').user)
origin = self.context.get('request').build_absolute_uri() + str(id)
image = Image.objects.create(id=id, user=user, origin=origin, photo=validated_data['photo'])
image.save()
return image
class AuthorSerializer(serializers.ModelSerializer):
"""
Serializer used for doing author profile related operations.
"""
displayName = serializers.CharField(source='user.username')
first_name = serializers.CharField(source='user.first_name', allow_blank=True, required=False)
last_name = serializers.CharField(source='user.last_name', allow_blank=True, required=False)
email = serializers.CharField(source='user.email', allow_blank=True, required=False)
password = serializers.CharField(source='user.password', write_only=True)
friends = AuthorInfoSerializer(many=True, required=False)
host = serializers.URLField(read_only=True)
url = serializers.URLField(read_only=True)
request_sent = AuthorInfoSerializer(source='get_request_sent', many=True, read_only=True)
request_received = AuthorInfoSerializer(source='get_request_received', many=True, read_only=True)
is_active = serializers.BooleanField(source='user.is_active', read_only=True)
class Meta:
model = Author
fields = ('id', 'displayName', 'password', 'first_name', 'last_name',
'email', 'bio', 'host', 'url', 'github', 'friends', 'request_sent', 'request_received', 'is_active')
# # Need to be created as User is a nest object of Author.
# # Returns an author object with user object as an field after extracting data from json.
def create(self, validated_data):
id = uuid.uuid4()
host = "http://"+self.context.get('request').get_host()+"/"
url = self.context.get('request').build_absolute_uri() + str(id) + '/'
user_data = validated_data.pop('user')
user_object = User.objects.create_user(is_active=False, **user_data)
author = Author.objects.create(id=id, user=user_object, host=host, url=url, **validated_data)
author.save()
return author
# For updating author profile, need extract user form it and handle the nested object as well.
def update(self, author, validated_data):
user_data = validated_data.pop('user')
user = author.user
user.username=user_data.get('username', user.username)
user.password=author.user.password
user.first_name=user_data.get('first_name', user.first_name)
user.last_name=user_data.get('last_name', user.last_name)
user.email=user_data.get('email', user.email)
user.save()
author.bio = validated_data.get('bio', author.bio)
author.github = validated_data.get('github', author.github)
author.save()
return author
class FriendsWithSerializer(serializers.ModelSerializer):
"""
Serializer used for doing friend related operations.
"""
authors = serializers.SerializerMethodField('getFriends')
class Meta:
model = Author
fields = ['authors']
# Returns a list of friend's id for an author.
def getFriends(self, obj):
query = obj.friends.all().values('id')
res = []
for item in query:
res.append(item.values()[0])
return res
|
#!/usr/bin/env python
#
# Bootloader for the Swift Navigation Piksi GPS Receiver
#
# Copyright (C) 2010 Gareth McMullin <[email protected]>
# Copyright (C) 2011 Piotr Esden-Tempski <[email protected]>
# Copyright (C) 2013-2014 Swift Navigation Inc <www.swift-nav.com>
#
# Contacts: Colin Beighley <[email protected]>
# Fergus Noble <[email protected]>
#
# Based on luftboot, a bootloader for the Paparazzi UAV project.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`piksi_tools.bootload` module contains functions loading firmware
images.
"""
import time
import struct
import sys
import serial_link
from sbp.bootload import *
from sbp.deprecated import *
from sbp.logging import *
from sbp.piksi import *
from sbp.client.handler import Handler
class Bootloader():
"""
Bootloader
The :class:`Bootloader` loads firmware.
"""
def __init__(self, link):
self.stopped = False
self.handshake_received = False
self.version = None
# SBP version is unset in older devices.
self.sbp_version = (0, 0)
self.link = link
self.link.add_callback(self._deprecated_callback, SBP_MSG_BOOTLOADER_HANDSHAKE_DEPRECATED)
self.link.add_callback(self._handshake_callback, SBP_MSG_BOOTLOADER_HANDSHAKE_DEVICE)
def __enter__(self):
return self
def __exit__(self, *args):
if not self.stopped:
self.stop()
def stop(self):
self.stopped = True
self.link.remove_callback(self._deprecated_callback, SBP_MSG_BOOTLOADER_HANDSHAKE_DEPRECATED)
self.link.remove_callback(self._handshake_callback, SBP_MSG_BOOTLOADER_HANDSHAKE_DEVICE)
def _deprecated_callback(self, sbp_msg):
if len(sbp_msg.payload)==1 and struct.unpack('B', sbp_msg.payload[0])==0:
# == v0.1 of the bootloader, returns hardcoded version number 0.
self.version = "v0.1"
else:
# > v0.1 of the bootloader, returns git commit string.
self.version = sbp_msg.payload[:]
self.handshake_received = True
def _handshake_callback(self, sbp_msg):
self.version = sbp_msg.version
self.sbp_version = ((sbp_msg.flags >> 8) & 0xF, sbp_msg.flags & 0xF)
self.handshake_received = True
def wait_for_handshake(self, timeout=None):
if timeout is not None:
t0 = time.time()
self.handshake_received = False
while not self.handshake_received:
time.sleep(0.1)
if timeout is not None:
if time.time()-timeout > t0:
return False
return True
def reply_handshake(self):
# < 0.45 of SBP protocol, reuse single handshake message.
if self.sbp_version < (0, 45):
self.link.send(SBP_MSG_BOOTLOADER_HANDSHAKE_DEPRECATED, '\x00')
else:
self.link.send(SBP_MSG_BOOTLOADER_HANDSHAKE_HOST, '\x00')
def jump_to_app(self):
self.link.send(SBP_MSG_BOOTLOADER_JUMP_TO_APP, '\x00')
def get_args():
"""
Get and parse arguments.
"""
import argparse
parser = argparse.ArgumentParser(description='Piksi Bootloader')
parser.add_argument("file",
help="the Intel hex file to write to flash.")
parser.add_argument('-m', '--m25',
help='write the file to the M25 (FPGA) flash.',
action="store_true")
parser.add_argument('-s', '--stm',
help='write the file to the STM flash.',
action="store_true")
parser.add_argument('-e', '--erase',
help='erase sectors 1-11 of the STM flash.',
action="store_true")
parser.add_argument('-p', '--port',
default=[serial_link.SERIAL_PORT], nargs=1,
help='specify the serial port to use.')
parser.add_argument("-b", "--baud",
default=[serial_link.SERIAL_BAUD], nargs=1,
help="specify the baud rate to use.")
parser.add_argument("-f", "--ftdi",
help="use pylibftdi instead of pyserial.",
action="store_true")
args = parser.parse_args()
if args.stm and args.m25:
parser.error("Only one of -s or -m options may be chosen")
sys.exit(2)
elif not args.stm and not args.m25:
parser.error("One of -s or -m options must be chosen")
sys.exit(2)
elif args.erase and not args.stm:
parser.error("The -e option requires the -s option to also be chosen")
sys.exit(2)
return args
def main():
"""
Get configuration, get driver, and build handler and start it.
"""
args = get_args()
port = args.port[0]
baud = args.baud[0]
use_ftdi = args.ftdi
use_m25 = args.m25
use_stm = args.stm
erase = args.erase
# Driver with context
with serial_link.get_driver(use_ftdi, port, baud) as driver:
# Handler with context
with Handler(driver.read, driver.write) as link:
link.start()
link.send(SBP_MSG_RESET, "")
time.sleep(0.2)
link.add_callback(serial_link.printer, SBP_MSG_PRINT)
# Tell Bootloader we want to write to the flash.
with Bootloader(link) as piksi_bootloader:
print "Waiting for bootloader handshake message from Piksi ...",
sys.stdout.flush()
try:
piksi_bootloader.wait_for_handshake()
except KeyboardInterrupt:
return
piksi_bootloader.reply_handshake()
print "received."
print "Piksi Onboard Bootloader Version:", piksi_bootloader.version
if piksi_bootloader.sbp_version > (0, 0):
print "Piksi Onboard SBP Protocol Version:", piksi_bootloader.sbp_version
# Catch all other errors and exit cleanly.
try:
import flash
with flash.Flash(link, flash_type=("STM" if use_stm else "M25"),
sbp_version=piksi_bootloader.sbp_version) as piksi_flash:
if erase:
for s in range(1,12):
print "\rErasing STM Sector", s,
sys.stdout.flush()
piksi_flash.erase_sector(s)
print
from intelhex import IntelHex
ihx = IntelHex(args.file)
piksi_flash.write_ihx(ihx, sys.stdout, mod_print=0x10)
print "Bootloader jumping to application"
piksi_bootloader.jump_to_app()
except:
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
|
# coding=utf-8
import unittest
"""973. K Closest Points to Origin
https://leetcode.com/problems/k-closest-points-to-origin/description/
We have a list of `points` on the plane. Find the `K` closest points to the
origin `(0, 0)`.
(Here, the distance between two points on a plane is the Euclidean distance.)
You may return the answer in any order. The answer is guaranteed to be unique
(except for the order that it is in.)
**Example 1:**
**Input:** points = [[1,3],[-2,2]], K = 1
**Output:** [[-2,2]]
**Explanation:**
The distance between (1, 3) and the origin is sqrt(10).
The distance between (-2, 2) and the origin is sqrt(8).
Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
We only want the closest K = 1 points from the origin, so the answer is just [[-2,2]].
**Example 2:**
**Input:** points = [[3,3],[5,-1],[-2,4]], K = 2
**Output:** [[3,3],[-2,4]]
(The answer [[-2,4],[3,3]] would also be accepted.)
**Note:**
1. `1 <= K <= points.length <= 10000`
2. `-10000 < points[i][0] < 10000`
3. `-10000 < points[i][1] < 10000`
Similar Questions:
Kth Largest Element in an Array (kth-largest-element-in-an-array)
Top K Frequent Elements (top-k-frequent-elements)
Top K Frequent Words (top-k-frequent-words)
"""
class Solution(object):
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
class T(unittest.TestCase):
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
############################################################################
# This file is part of the Maui Web site.
#
# Copyright (c) 2012 Pier Luigi Fiorini
# Copyright (c) 2009-2010 Krzysztof Grodzicki
#
# Author(s):
# Pier Luigi Fiorini <[email protected]>
#
# $BEGIN_LICENSE:AGPL3+$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $END_LICENSE$
############################################################################
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.comments.moderation import CommentModerator, moderator
from taggit.managers import TaggableManager
from apps.news.managers import PublishedNewsManager
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return u"%s" % self.name
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
class News(models.Model):
user = models.ForeignKey(User)
site = models.ManyToManyField(Site)
title = models.CharField(max_length=255)
category = models.ForeignKey(Category)
slug = models.SlugField(unique=True)
body = models.TextField()
published = models.BooleanField(default=False)
comments_enabled = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
pubs_objects = PublishedNewsManager()
tags = TaggableManager()
def __unicode__(self):
return u"%s" % self.title
def get_absolute_url(self):
return reverse("news_details", kwargs={"slug": self.slug})
class Meta:
verbose_name = _("News")
verbose_name_plural = _("News")
ordering = ["-created_at"]
class NewsCommentModerator(CommentModerator):
"""
News comments moderator.
The following logic applies:
- unauthenticated users' comments are always moderated
and an email is sent to the administrators
- respect the ``comments_enabled`` flag on news
- comments are closed after 30 days the news item was
created
"""
email_notification = True
auto_close_field = "created_at"
close_after = 30
enable_field = "comments_enabled"
def email(self, comment, content_object, request):
if not request.user.is_authenticated():
return True
return False
def moderate(self, comment, content_object, request):
if not request.user.is_authenticated():
return True
return False
moderator.register(News, NewsCommentModerator)
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from itertools import ifilter
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import http
class Sender(object):
"""
Sends SSYNC requests to the object server.
These requests are eventually handled by
:py:mod:`.ssync_receiver` and full documentation about the
process is there.
"""
def __init__(self, daemon, node, job, suffixes, remote_check_objs=None):
self.daemon = daemon
self.df_mgr = self.daemon._diskfile_mgr
self.node = node
self.job = job
self.suffixes = suffixes
self.connection = None
self.response = None
self.response_buffer = ''
self.response_chunk_left = 0
# available_map has an entry for each object in given suffixes that
# is available to be sync'd; each entry is a hash => timestamp
self.available_map = {}
# When remote_check_objs is given in job, ssync_sender trys only to
# make sure those objects exist or not in remote.
self.remote_check_objs = remote_check_objs
# send_list has an entry for each object that the receiver wants to
# be sync'ed; each entry is an object hash
self.send_list = []
self.failures = 0
def __call__(self):
"""
Perform ssync with remote node.
:returns: a 2-tuple, in the form (success, can_delete_objs) where
success is a boolean and can_delete_objs is the map of
objects that are in sync with the receiver. Each entry in
can_delete_objs maps a hash => timestamp
"""
if not self.suffixes:
return True, {}
try:
# Double try blocks in case our main error handler fails.
try:
# The general theme for these functions is that they should
# raise exceptions.MessageTimeout for client timeouts and
# exceptions.ReplicationException for common issues that will
# abort the replication attempt and log a simple error. All
# other exceptions will be logged with a full stack trace.
self.connect()
self.missing_check()
if self.remote_check_objs is None:
self.updates()
can_delete_obj = self.available_map
else:
# when we are initialized with remote_check_objs we don't
# *send* any requested updates; instead we only collect
# what's already in sync and safe for deletion
in_sync_hashes = (set(self.available_map.keys()) -
set(self.send_list))
can_delete_obj = dict((hash_, self.available_map[hash_])
for hash_ in in_sync_hashes)
if not self.failures:
return True, can_delete_obj
else:
return False, {}
except (exceptions.MessageTimeout,
exceptions.ReplicationException) as err:
self.daemon.logger.error(
'%s:%s/%s/%s %s', self.node.get('replication_ip'),
self.node.get('replication_port'), self.node.get('device'),
self.job.get('partition'), err)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
self.daemon.logger.exception(
'%s:%s/%s/%s EXCEPTION in replication.Sender',
self.node.get('replication_ip'),
self.node.get('replication_port'),
self.node.get('device'), self.job.get('partition'))
finally:
self.disconnect()
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
# This particular exception handler does the minimal amount as it
# would only get called if the above except Exception handler
# failed (bad node or job data).
self.daemon.logger.exception('EXCEPTION in replication.Sender')
return False, {}
def connect(self):
"""
Establishes a connection and starts an SSYNC request
with the object server.
"""
with exceptions.MessageTimeout(
self.daemon.conn_timeout, 'connect send'):
self.connection = bufferedhttp.BufferedHTTPConnection(
'%s:%s' % (self.node['replication_ip'],
self.node['replication_port']))
self.connection.putrequest('SSYNC', '/%s/%s' % (
self.node['device'], self.job['partition']))
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(self.job['policy']))
# a sync job must use the node's index for the frag_index of the
# rebuilt fragments instead of the frag_index from the job which
# will be rebuilding them
frag_index = self.node.get('index', self.job.get('frag_index'))
if frag_index is None:
# replication jobs will not have a frag_index key;
# reconstructor jobs with only tombstones will have a
# frag_index key explicitly set to the value of None - in both
# cases on the wire we write the empty string which
# ssync_receiver will translate to None
frag_index = ''
self.connection.putheader('X-Backend-Ssync-Frag-Index',
frag_index)
# a revert job to a handoff will not have a node index
self.connection.putheader('X-Backend-Ssync-Node-Index',
self.node.get('index', ''))
self.connection.endheaders()
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'connect receive'):
self.response = self.connection.getresponse()
if self.response.status != http.HTTP_OK:
err_msg = self.response.read()[:1024]
raise exceptions.ReplicationException(
'Expected status %s; got %s (%s)' %
(http.HTTP_OK, self.response.status, err_msg))
def readline(self):
"""
Reads a line from the SSYNC response body.
httplib has no readline and will block on read(x) until x is
read, so we have to do the work ourselves. A bit of this is
taken from Python's httplib itself.
"""
data = self.response_buffer
self.response_buffer = ''
while '\n' not in data and len(data) < self.daemon.network_chunk_size:
if self.response_chunk_left == -1: # EOF-already indicator
break
if self.response_chunk_left == 0:
line = self.response.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
self.response_chunk_left = int(line.strip(), 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
if self.response_chunk_left == 0:
self.response_chunk_left = -1
break
chunk = self.response.fp.read(min(
self.response_chunk_left,
self.daemon.network_chunk_size - len(data)))
if not chunk:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
self.response_chunk_left -= len(chunk)
if self.response_chunk_left == 0:
self.response.fp.read(2) # discard the trailing \r\n
data += chunk
if '\n' in data:
data, self.response_buffer = data.split('\n', 1)
data += '\n'
return data
def missing_check(self):
"""
Handles the sender-side of the MISSING_CHECK step of a
SSYNC request.
Full documentation of this can be found at
:py:meth:`.Receiver.missing_check`.
"""
# First, send our list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check start'):
msg = ':MISSING_CHECK: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
hash_gen = self.df_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
if self.remote_check_objs is not None:
hash_gen = ifilter(
lambda path_objhash_timestamp:
path_objhash_timestamp[1] in
self.remote_check_objs, hash_gen)
for path, object_hash, timestamp in hash_gen:
self.available_map[object_hash] = timestamp
with exceptions.MessageTimeout(
self.daemon.node_timeout,
'missing_check send line'):
msg = '%s %s\r\n' % (
urllib.quote(object_hash),
urllib.quote(timestamp))
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check end'):
msg = ':MISSING_CHECK: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, retrieve the list of what they want.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: END':
break
parts = line.split()
if parts:
self.send_list.append(parts[0])
def updates(self):
"""
Handles the sender-side of the UPDATES step of an SSYNC
request.
Full documentation of this can be found at
:py:meth:`.Receiver.updates`.
"""
# First, send all our subrequests based on the send_list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates start'):
msg = ':UPDATES: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for object_hash in self.send_list:
try:
df = self.df_mgr.get_diskfile_from_hash(
self.job['device'], self.job['partition'], object_hash,
self.job['policy'], frag_index=self.job.get('frag_index'))
except exceptions.DiskFileNotExist:
continue
url_path = urllib.quote(
'/%s/%s/%s' % (df.account, df.container, df.obj))
try:
df.open()
# EC reconstructor may have passed a callback to build
# an alternative diskfile...
df = self.job.get('sync_diskfile_builder', lambda *args: df)(
self.job, self.node, df.get_metadata())
except exceptions.DiskFileDeleted as err:
self.send_delete(url_path, err.timestamp)
except exceptions.DiskFileError:
pass
else:
self.send_put(url_path, df)
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates end'):
msg = ':UPDATES: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, read their response for any issues.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: END':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
def send_delete(self, url_path, timestamp):
"""
Sends a DELETE subrequest with the given information.
"""
msg = ['DELETE ' + url_path, 'X-Timestamp: ' + timestamp.internal]
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_delete'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
def send_put(self, url_path, df):
"""
Sends a PUT subrequest for the url_path using the source df
(DiskFile) and content_length.
"""
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
# Sorted to make it easier to test.
for key, value in sorted(df.get_metadata().items()):
if key not in ('name', 'Content-Length'):
msg.append('%s: %s' % (key, value))
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout, 'send_put'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for chunk in df.reader():
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_put chunk'):
self.connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
def disconnect(self):
"""
Closes down the connection to the object server once done
with the SSYNC request.
"""
if not self.connection:
return
try:
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'):
self.connection.send('0\r\n\r\n')
except (Exception, exceptions.Timeout):
pass # We're okay with the above failing.
self.connection.close()
|
# -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import slugify
from django.conf import settings
from disciplinas.models import Disciplina
from empresa.models import Empresa
from departamentos.models import Departamento
from semestre.models import Semestre
from projetos.models import ProjetoDeGraduacao
from salas.models import Sala
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
class Banca(models.Model):
hora_inicial = models.TimeField( null=True, blank = True)
hora_final = models.TimeField( null=True, blank = True)
data = models.DateField(null = True, blank = True, verbose_name = "Data")
semestre = models.ForeignKey(Semestre,verbose_name = 'Semestre', related_name='banca_semestre', null=True, blank = True)
banca_docente= models.ForeignKey(settings.AUTH_USER_MODEL,related_name= 'banca1',limit_choices_to={'docente': True}, null=True, blank=True)
banca_convidado = models.ForeignKey(settings.AUTH_USER_MODEL,related_name= 'banca2',limit_choices_to=Q(docente = True) | Q(doutorando = True) | Q(mestrando = True), null=True, blank=True)
sala = models.ForeignKey(Sala, null=True, blank = True)
reservada = models.BooleanField()
alteravel = models.BooleanField(default = True)
projeto = models.ForeignKey(ProjetoDeGraduacao, related_name = u'banca_projeto', null = True, blank = True)
cancelada = models.BooleanField(default = False)
def __unicode__(self):
if self.reservada:
reservada = 'Reservada'
else:
reservada = ''
return unicode(self.sala)+ ', Dia: '+str(self.data)+u' Horário: '+ str(self.hora_inicial)+' - '+str(self.hora_final)+ ' '+reservada
class Agenda(models.Model):
class Meta:
verbose_name = _(u'Agendamento')
verbose_name_plural = _(u'Agendamento')
semestre = models.ForeignKey(Semestre, related_name='agenda_departamento')
salas = models.ForeignKey(Sala, related_name= 'agenda_sala',limit_choices_to={'ativa': True})
def __unicode__(self):
return u'Sala: '+unicode(self.salas.nome) + u' - ' +unicode(self.semestre)
DIAS_DA_SEMANA = (
('seg','Segunda-feira'),
('ter','Terça-feira'),
('qua','Quarta-feira'),
('qui','Quinta-feira'),
('sex','Sexta-feira'),
('sab','Sabado'),
)
class Dia_Agenda(models.Model):
class Meta:
verbose_name = _(u'Dia agendamento')
verbose_name_plural = _(u'Dias agendamento')
agenda = models.ForeignKey(Agenda)
data = models.DateField(null = True, blank = True, verbose_name = "Data")
horarios = models.CharField(max_length = 255,help_text = "Exemplo: 8:00-8:40;8:40-9:20;9:20-10:00;10:00-10:40;10:40-11:20;11:20-12:00;12:00-12:40;12:40-13:20;13:20-14:00;14:00-14:40;14:40-15:20;15:20-16:00;16:00-16:40;16:40-17:20;17:20-18:00;19:00-19:40;19:40-20:20;20:20-21:00",null = True, blank = True)
class Apresentacao(models.Model):
data = models.DateTimeField(verbose_name='Data da apresentação')
sala = models.CharField(max_length=80,verbose_name='Sala')
usuario = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Usuário')
banca = models.ForeignKey(Banca, verbose_name='Banca')
|
from core.himesis import Himesis
import uuid
class HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans, self).__init__(name='HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class State()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class ExitPoint() node
self.add_node()
self.vs[5]["mm__"] = """ExitPoint"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class ExitPoint()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class LocalDef() node
self.add_node()
self.vs[7]["mm__"] = """LocalDef"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class LocalDef()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply class ProcDef() node
self.add_node()
self.vs[9]["mm__"] = """ProcDef"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class ProcDef()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[11]["mm__"] = """Name"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# apply class Par() node
self.add_node()
self.vs[13]["mm__"] = """Par"""
self.vs[13]["attr1"] = """1"""
# apply_contains node for class Par()
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
# apply class Trigger() node
self.add_node()
self.vs[15]["mm__"] = """Trigger"""
self.vs[15]["attr1"] = """1"""
# apply_contains node for class Trigger()
self.add_node()
self.vs[16]["mm__"] = """apply_contains"""
# match association State--exitPoints-->ExitPoint node
self.add_node()
self.vs[17]["attr1"] = """exitPoints"""
self.vs[17]["mm__"] = """directLink_S"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[18]["attr1"] = """def"""
self.vs[18]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[19]["attr1"] = """channelNames"""
self.vs[19]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->Par node
self.add_node()
self.vs[20]["attr1"] = """p"""
self.vs[20]["mm__"] = """directLink_T"""
# apply association Par--p-->Trigger node
self.add_node()
self.vs[21]["attr1"] = """p"""
self.vs[21]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[22]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class State()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class ExitPoint()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class LocalDef()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class ProcDef()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class Name()
(1,14), # applymodel -> apply_contains
(14,13), # apply_contains -> apply_class Par()
(1,16), # applymodel -> apply_contains
(16,15), # apply_contains -> apply_class Trigger()
(3,17), # match_class State() -> association exitPoints
(17,5), # association exitPoints -> match_class ExitPoint()
(7,18), # apply_class LocalDef() -> association def
(18,9), # association def -> apply_class ProcDef()
(9,19), # apply_class ProcDef() -> association channelNames
(19,11), # association channelNames -> apply_class Name()
(9,20), # apply_class ProcDef() -> association p
(20,13), # association p -> apply_class Par()
(13,21), # apply_class Par() -> association p
(21,15), # association p -> apply_class Trigger()
(7,22), # apply_class LocalDef() -> backward_association
(22,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((7,'__ApplyAttribute'),('constant','localdefcompstate')), ((9,'name'),('concat',(('constant','B'),(5,'name')))), ((11,'literal'),('constant','sh_in')), ((13,'__ApplyAttribute'),('constant','parexitpoint')), ((15,'channel'),('constant','sh_in')), ]
|
# © Christian Sommerfeldt Øien
# All rights reserved
class Pause:
def __init__(self, d):
self.span = d
class Note(Pause):
def __init__(self, d, i, p):
Pause.__init__(self, d)
self.duration = self.span
self.label = i
self.params = p
def __call__(self):
return (self.duration, self.label, self.params)
class ImpliedDurationNote(Note):
def __call__(self):
return (self.label, self.params)
class CompositionFilter:
def __init__(self, i, p):
self.label = i
self.params = p
def __call__(self, duration):
return (duration, self.label, self.params)
class NoteComposition(Note):
def __init__(self):
Pause.__init__(self, 0)
self.score = []
self.filters = []
def sequence(self, t, notes):
r = []
for n in notes:
n.time = t
if isinstance(n, Note):
r.append(n)
t += n.span
if self.span < t:
self.span = t
self.score.extend(r)
def __call__(self):
r = [[f(self.span) for f in self.filters]]
for note in self.score:
r.append((note.time, note()))
# improve: collapse if note is NoteComposition with
# no filters, onto self.score offset by note.time
# (but wait with this; optimization)
return r
|
# -*- coding: utf-8 -*-
from openerp import netsvc
from openerp.osv import osv, fields
class reevaluate_wizard(osv.osv_memory):
_name = 'kinesis_athletics.reevaluate.wizard'
_description = 'Wizard'
def confirm(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
active_ids=context.get('active_ids', False)
evaluation_obj = self.pool['kinesis_athletics.evaluation']
evaluation_ids = evaluation_obj.browse(cr, uid, active_ids, context=context)
new_evaluation_ids = []
for evaluation in evaluation_ids:
evaluation_id = evaluation_obj.new_evaluation(cr, uid, [evaluation.id], context=context)
new_evaluation_ids.append(evaluation_id['res_id'])
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
action_evaluation = mod_obj.get_object_reference(cr, uid, 'kinesis_athletics', 'action_kinesis_athletics_evaluation_evaluations')
action_evaluation_id = action_evaluation and action_evaluation[1] or False
action_evaluation = act_obj.read(cr, uid, [action_evaluation_id], context=context)[0]
action_evaluation['domain'] = [('id','in',new_evaluation_ids)]
return action_evaluation
|
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Coderwall(Platform):
"""A <Platform> object for Coderwall"""
def __init__(self):
self.platformName = "Coderwall"
self.tags = ["development", "education"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://coderwall.com/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["<title>404 : Unable to handle that url</title>"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
# -*- coding: utf-8 -*-
import salt.client
import pprint
import os
import sys
import logging
import salt.utils
import salt.utils.master
log = logging.getLogger(__name__)
"""
The original purpose of this runner is to verify that proceeding with an
upgrade is safe. All expected processes are running.
A secondary purpose is a utility to check the current state of all processes.
"""
def check(cluster='ceph', roles=[], tolerate_down=0, verbose=True):
"""
Query the status of running processes for each role. Also, verify that
all minions assigned roles do respond. Return False if any fail.
"""
search = "I@cluster:{}".format(cluster)
if not roles:
roles = _cached_roles(search)
status = _status(search, roles, verbose)
log.debug("roles: {}".format(pprint.pformat(roles)))
log.debug("status: {}".format(pprint.pformat(status)))
ret = True
for role in status.keys():
for minion in status[role]:
if status[role][minion] is False:
if tolerate_down == 0:
log.error("ERROR: {} process on {} is not running".format(role, minion))
ret = False
tolerate_down -= 1
return ret
def mon(cluster='ceph'):
"""
Query all monitors. If any are running, assume cluster is running and
return true. The purpose of this function is to act as a conditional
to determines whether minion steps should happen serially or in parallel.
"""
status = _status("I@cluster:{}".format(cluster), ['mon'], False)
running = False
for minion in status['mon']:
if status['mon'][minion]:
return True
return False
def _status(search, roles, verbose):
"""
Return a structure of roles with module results
"""
# When search matches no minions, salt prints to stdout. Suppress stdout.
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
status = {}
local = salt.client.LocalClient()
for role in roles:
role_search = search + " and I@roles:{}".format(role)
status[role] = local.cmd(role_search,
'cephprocesses.check',
roles=roles,
verbose=verbose,
expr_form="compound")
sys.stdout = _stdout
log.debug(pprint.pformat(status))
return status
def _cached_roles(search):
"""
Return the cached roles in a convenient structure. Trust the cached
values from the master pillar since a downed minion will be absent
from any dynamic query. Also, do not worry about downed minions that
are outside of the search criteria.
"""
pillar_util = salt.utils.master.MasterPillarUtil(search, "compound",
use_cached_grains=True,
grains_fallback=False,
opts=__opts__)
cached = pillar_util.get_minion_pillar()
roles = {}
for minion in cached.keys():
if 'roles' in cached[minion]:
for role in cached[minion]['roles']:
roles.setdefault(role, []).append(minion)
log.debug(pprint.pformat(roles))
return roles.keys()
def wait(cluster='ceph', **kwargs):
"""
Wait for all processes to be up or until the timeout expires.
"""
settings = {
'timeout': _timeout(cluster=cluster),
'delay': 3
}
settings.update(kwargs)
search = "I@cluster:{}".format(cluster)
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
status = {}
local = salt.client.LocalClient()
status = local.cmd(search,
'cephprocesses.wait',
['timeout={}'.format(settings['timeout']),
'delay={}'.format(settings['delay'])],
expr_form="compound")
sys.stdout = _stdout
log.debug("status: ".format(pprint.pformat(status)))
if False in status.values():
for minion in status.keys():
if status[minion] is False:
log.error("minion {} failed".format(minion))
return False
return True
def _timeout(cluster='ceph'):
"""
Assume 15 minutes for physical hardware since some hardware has long
shutdown/reboot times. Assume 2 minutes for complete virtual environments.
"""
local = salt.client.LocalClient()
search = "I@cluster:{}".format(cluster)
virtual = local.cmd(search, 'grains.get', ['virtual'], expr_form="compound")
if 'physical' in virtual.values():
return 900
else:
return 120
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from os import environ, path, walk
from os.path import normpath, exists
from sys import argv
from xml.etree.ElementTree import parse
NAME_SPACES = {'ns': 'http://maven.apache.org/SETTINGS/1.0.0'}
def text(e, n):
return e.findtext(f'ns:{n}', namespaces=NAME_SPACES)
def findall(e, n):
return e.findall(f'.//ns:{n}', namespaces=NAME_SPACES)
def get_settings_info(settings_path):
e = parse(normpath(settings_path))
local_repo = text(e, 'localRepository')
if '${user.home}' in local_repo:
local_repo = local_repo.replace('${user.home}', environ['HOME'])
remotes = {text(r, 'id'): text(r, 'url')
for r in findall(e, 'repository')}
plugin_remotes = {text(r, 'id'): text(r, 'url')
for r in findall(e, 'pluginRepository')}
mirrors = {text(r, 'id'): text(r, 'url')
for r in findall(e, 'mirror')}
return local_repo, {**remotes, **plugin_remotes, **mirrors}
def excluded(f):
if f in ['_remote.repositories',
'maven-metadata-local.xml',
'maven-metadata-ossrh.xml',
'resolver-status.properties']:
return True
if f.endswith('asc'):
return True
if f.endswith('lastUpdated'):
return True
return False
def get_file_repos_info(local_repo, files):
for f in files:
root, file = f.rsplit('/', 1)
remote_repositories = path.join(root, '_remote.repositories')
if exists(remote_repositories):
yield (remote_repositories,
root[len(local_repo):],
(file,))
def get_repos_info(local_repo):
for root, dirs, files in walk(local_repo):
if files and '_remote.repositories' in files:
yield (path.join(root, '_remote.repositories'),
root[len(local_repo):],
[f for f in files
if not excluded(f)])
def parse_remote_repos(remote_repositories, remotes):
with open(remote_repositories, 'rt', encoding='utf-8') as f:
lines = [l for l in f.read().splitlines() if not l.startswith('#')]
return {k: remotes.get(v.replace('=', ''), None) for k, v in
[l.split('>') for l in lines]}
def create_commands(remote_dict, artifact_path, artifacts):
def create_artifact_command(artifact):
extension = artifact.rsplit('.', 1)[1]
remote_dict_key = artifact[:-(len(extension) + 1)] if extension in [
'sha1', 'md5', 'sha256'] else artifact
remote_url = remote_dict.get(remote_dict_key, None)
if remote_url:
if remote_url.endswith('/'):
remote_url = remote_url[:-1]
rel_path = f'{artifact_path}/{artifact}'
return f'curl -o "{rel_path}" {remote_url}/{rel_path}'
else:
return None
mkdir_commands = [f'mkdir -p "{artifact_path}"']
download_commands = [create_artifact_command(artifact) for artifact in
artifacts]
return mkdir_commands + download_commands if any(download_commands) else []
def create_urls(remote_dict, artifact_path, artifacts):
def create_artifact_urls(artifact):
extension = artifact.rsplit('.', 1)[1]
remote_dict_key = artifact[:-(len(extension) + 1)] if extension in [
'sha1', 'md5', 'sha256'] else artifact
remote_url = remote_dict.get(remote_dict_key, None)
if remote_url:
if remote_url.endswith('/'):
remote_url = remote_url[:-1]
rel_path = f'{artifact_path}/{artifact}'
return f'{remote_url}/{rel_path}'
else:
return f'No url found for: {artifact_path}/{artifact}'
urls = [create_artifact_urls(artifact) for artifact in artifacts]
return urls if any(urls) else []
def main(settings_file, urls_only, skipped, files=None):
skipped_artifacts = []
local_repo, remotes = get_settings_info(settings_file)
repos_info = get_file_repos_info(local_repo, files) if files else get_repos_info(local_repo)
for remote_repos, artifact_path, artifacts in repos_info:
remote_dict = parse_remote_repos(remote_repos, remotes)
params = [remote_dict, artifact_path, artifacts]
outputs = create_urls(*params) if urls_only else create_commands(
*params)
if outputs:
for c in outputs:
print(c)
else:
skipped_artifacts.append(artifact_path)
if skipped:
for s in skipped_artifacts:
print('# Skipped:', s)
def parse_args(args):
parser = ArgumentParser(
description='Print download commands (POSIX) to enable making '
'a remote repo based on the content of a local')
parser.add_argument(
'settings_file', help='Settings file')
parser.add_argument(
'-u', '--urls_only', help='Print only urls', action='store_true')
parser.add_argument(
'-s', '--skipped', help='Print skipped artifacts', action='store_true')
parser.add_argument(
'-f', '--files', nargs='*', help='Only for specified files and not all in local_repo')
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
#!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.user import User
from boto import UserAgent
try:
from hashlib import md5
except ImportError:
from md5 import md5
from boto.s3.key import *
class AsyncKey(Key):
def __init__(self, bucket=None, name=None):
Key.__init__(self, bucket=bucket, name=name)
def __repr__(self):
if self.bucket:
return '<AsyncKey: %s,%s>' % (self.bucket.name, self.name)
else:
return '<AsyncKey: None,%s>' % self.name
def open_read(self, headers=None, query_args=None,
override_num_retries=None, response_headers=None, callback=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string (ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
def opened_read(response):
self.resp = response
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name,value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
if callable(callback):
callback(response)
self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries, callback=opened_read)
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None, callback=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries, callback=callback)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries, callback=callback)
else:
raise BotoClientError('Invalid mode: %s' % mode)
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
raise BotoClientError('Not Implemented')
def read(self, size=0, callback=None):
def _read(response):
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
if callable(callback):
callback(data)
self.open_read(callback=_read)
def exists(self, callback=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
def existence_tested(response):
if callable(callback):
callback(bool(response))
self.bucket.lookup(self.name, callback=existence_tested)
def delete(self, callback=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id, callback=callback)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, callback=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
"""
provider = self.bucket.connection.provider
def sender(http_conn, method, path, data, headers, sendback=None):
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
if chunked_transfer:
# MD5 for the stream has to be calculated on the fly, as
# we don't know the size of the stream before hand.
m = md5()
else:
fp.seek(0)
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 3 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
if cb:
if chunked_transfer:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred.
cb_count = (1024 * 1024)/self.BufferSize
self.size = 0
elif num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
l = fp.read(self.BufferSize)
while len(l) > 0:
if chunked_transfer:
http_conn.send('%x;\r\n' % len(l))
http_conn.send(l)
http_conn.send('\r\n')
else:
http_conn.send(l)
if cb:
total_bytes += len(l)
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes, self.size)
i = 0
if chunked_transfer:
m.update(l)
l = fp.read(self.BufferSize)
if chunked_transfer:
http_conn.send('0\r\n')
http_conn.send('\r\n')
if cb:
self.size = total_bytes
# Get the md5 which is calculated on the fly.
self.md5 = m.hexdigest()
else:
fp.seek(0)
if cb:
cb(total_bytes, self.size)
def sender_sent(response):
body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
if callable(sendback):
sendback(response)
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
if callable(sendback):
sendback(response)
else:
raise provider.storage_response_error(
response.status, response.reason, body)
http_conn.getresponse(callback=sender_sent)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if headers.has_key('Content-Encoding'):
self.content_encoding = headers['Content-Encoding']
if headers.has_key('Content-Type'):
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if not chunked_transfer:
headers['Content-Length'] = str(self.size)
# headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
def file_sent(resp):
self.handle_version_headers(resp, force=True)
if callable(callback):
callback(resp)
self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args, callback=file_sent)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None, callback=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
# Set the Transfer Encoding for Streams.
headers['Transfer-Encoding'] = 'chunked'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, callback=None):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the
file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of
the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
provider = self.bucket.connection.provider
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
md5 = self.compute_md5(fp)
else:
# even if md5 is provided, still need to set size of content
fp.seek(0, 2)
self.size = fp.tell()
fp.seek(0)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args, callback=callback)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
fp = open(filename, 'rb')
def _set_contents_from_filename(response):
fp.close()
if callable(callback):
callback(response)
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key, callback=_set_contents_from_filename)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False, callback=None):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
def _set_contents_from_string(response):
fp.close()
if callable(callback):
callback(response)
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key, callback=_set_contents_from_string)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, callback=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if cb:
if num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = []
if torrent:
query_args.append('torrent')
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, response_headers[key]))
query_args = '&'.join(query_args)
def file_got(response):
body = self.resp.read()
fp.write(body)
if cb:
cb(total_bytes, self.size)
self.close()
self.bucket.connection.debug = save_debug
if callable(callback):
callback(response)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries, callback=file_got)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id, callback=callback)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers, callback=callback)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
def got_contents_to_filename(response):
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception: pass
if callable(callback):
callback(response)
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers, callback=got_contents_to_filename)
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
def got_contents_as_string(response):
if callable(callback):
callback(fp.getvalue())
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers, callback=got_contents_as_string)
# vim:set ft=python sw=4 :
|
import sys
import dbus
import dbus.service
import dbus.mainloop.glib
from slip.dbus import polkit
class SELinuxDBus (object):
def __init__ (self):
self.bus = dbus.SystemBus()
self.dbus_object = self.bus.get_object("org.selinux", "/org/selinux/object")
@polkit.enable_proxy
def semanage (self, buf):
ret = self.dbus_object.semanage(buf, dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def restorecon (self, path):
ret = self.dbus_object.restorecon(path, dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def setenforce (self, value):
ret = self.dbus_object.setenforce(value, dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def customized (self):
ret = self.dbus_object.customized(dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def semodule_list (self):
ret = self.dbus_object.semodule_list(dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def relabel_on_boot(self, value):
ret = self.dbus_object.relabel_on_boot(value, dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def change_default_mode(self, value):
ret = self.dbus_object.change_default_mode(value, dbus_interface = "org.selinux")
return ret
@polkit.enable_proxy
def change_default_policy(self, value):
ret = self.dbus_object.change_default_policy(value, dbus_interface = "org.selinux")
return ret
if __name__ == "__main__":
try:
dbus_proxy = SELinuxDBus()
resp = dbus_proxy.setenforce(int(sys.argv[1]))
print (resp)
except dbus.DBusException, e:
print e
|
"""
Copyright (C) 2008, 2009 - Luke Kenneth Casson Leighton <[email protected]>
"""
from pyjamas import DOM
from pyjamas.ui import Event
class ClickHandler(object):
def __init__(self, preventDefault=False):
self._clickListeners = []
self._doubleclickListeners = []
self._clickPreventDefault = preventDefault
self.sinkEvents(Event.ONCLICK)
self.sinkEvents(Event.ONDBLCLICK)
def onClick(self, sender=None):
pass
def onDoubleClick(self, sender=None):
pass
def addDoubleClickListener(self, listener):
self._doubleclickListeners.append(listener)
def addClickListener(self, listener):
stylename = self.getStyleName()
if stylename:
self.addStyleName("%s-clickable" % stylename.split()[0])
self._clickListeners.append(listener)
def onBrowserEvent(self, event):
"""Listen to events raised by the browser and call the appropriate
method of the listener (widget, ..) object.
"""
type = DOM.eventGetType(event)
if type == "click":
if self._clickPreventDefault:
DOM.eventPreventDefault(event)
for listener in self._clickListeners:
if hasattr(listener, "onClick"):
listener.onClick(self)
else:
listener(self)
elif type == "dblclick":
if self._clickPreventDefault:
DOM.eventPreventDefault(event)
for listener in self._doubleclickListeners:
if hasattr(listener, "onDoubleClick"):
listener.onDoubleClick(self)
else:
listener(self)
def removeClickListener(self, listener):
self._clickListeners.remove(listener)
def removeDoubleClickListener(self, listener):
self._doubleclickListeners.remove(listener)
def clearClickListener(self):
self._clickListeners[:] = []
def clearDoubleClickListener(self):
self._doubleclickListeners.remove(listener)[:] = []
|
# vim:et:ts=4:sts=4:ai
"""
s3-backup
Copyright 2016 Philip J Freeman <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class LogicalVolumeSnapshot(object):
def __init__(self, volume, name, size, verbose=False):
import subprocess
self.volume = volume
self.name = name
self.mounted = False
self.verbose = verbose
lvcreate_cmd = ["lvcreate", "-L"+str(size)+"B", "-s", "-n",
self.name, self.volume.group.name + "/"
+ self.volume.name]
lvcreate_process = subprocess.Popen(lvcreate_cmd)
return_code = lvcreate_process.wait()
if return_code != 0:
raise Exception("Error: command failed")
self.device = "/dev/"+self.volume.group.name+"/"+self.name
def is_mounted(self):
return self.mounted
def ro_mount(self, mountpoint):
import subprocess
if self.is_mounted():
raise Exception("Snapshot already mounted.")
mount_cmd = ["mount", "-o", "ro", self.device, mountpoint]
mount_process = subprocess.Popen(mount_cmd)
return_code = mount_process.wait()
if return_code != 0:
raise Exception("Error: command failed")
elif self.verbose:
print "DEBUG: Successfully mounted", self.device, "on", \
mountpoint
self.mounted = True
def umount(self):
import subprocess
import time
#Avoid race conditions:
time.sleep(2)
if not self.is_mounted():
raise Exception("Snapshot not mounted.")
umount_cmd = ["umount", self.device]
umount_process = subprocess.Popen(umount_cmd)
return_code = umount_process.wait()
if return_code != 0:
raise Exception("Error: command failed")
elif self.verbose:
print "DEBUG: Successfully umounted", self.device
self.mounted = False
def remove(self):
import subprocess
if self.is_mounted():
raise Exception("Snapshot mounted.")
lvremove_cmd = ["lvremove", "-f", self.volume.group.name + "/" + \
self.name]
lvremove_process = subprocess.Popen(lvremove_cmd)
return_code = lvremove_process.wait()
if return_code != 0:
raise Exception("Error: command failed")
elif self.verbose:
print "DEBUG: Successfully removed", self.name
class LogicalVolume(object):
def __init__(self, group, volume_name, verbose=False):
import re
self.re_number = re.compile(r'^\s*(\d+)')
self.group = group
self.name = volume_name
self.verbose = verbose
def get_size(self):
import subprocess
size_cmd = ["lvs", "--noheadings", "--units", "B",
self.group.name + "/" + self.name, "-o", "lv_size"]
size_process = subprocess.Popen(size_cmd, stdout=subprocess.PIPE)
return_code = size_process.wait()
if return_code != 0:
raise Exception("Error: command failed")
output = size_process.stdout.read()
m_number = self.re_number.match(output)
if m_number == None:
raise Exception("Error: parsing command output: "+output)
size = int(m_number.group(1))
if self.verbose:
print "DEBUG: got LogicalVolume size:", size
return size
def make_snapshot(self, allocation_pct=100):
import datetime
lv_size = self.get_size()
snap_allocation = (
(int(
lv_size * float(allocation_pct/100.0) # Calculate percentage
) / 512) * 512 # Use a 512B boundry
+ (1024*1024*128) # add 128MB for overhead
)
if self.verbose:
print "DEBUG: "+str(allocation_pct)+"% of "+str(lv_size)+ \
"B = "+str(snap_allocation)+"B"
snap_name = self.name+".snapshot." + \
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
if self.verbose:
print "DEBUG: generating snapshot", snap_name, "in", self.group.name
return LogicalVolumeSnapshot(self, snap_name, snap_allocation,
verbose=self.verbose)
class VolumeGroup(object):
def __init__(self, group_name, verbose=False):
self.name = group_name
self.verbose = verbose
# TODO: Validation
def get_volume(self, volume_name):
return LogicalVolume(self, volume_name, verbose=self.verbose)
|
import dateutil
from corehq.apps.reports.dispatcher import CustomProjectReportDispatcher
from corehq.apps.reports.generic import ElasticProjectInspectionReport
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from dimagi.utils.parsing import ISO_DATE_FORMAT
class PactPatientDispatcher(CustomProjectReportDispatcher):
prefix = 'pactpatient'
def dispatch(self, request, *args, **kwargs):
ret = super(PactPatientDispatcher, self).dispatch(request, *args, **kwargs)
return ret
def get_reports(self, domain):
return self.report_map.get(domain, {})
class PactElasticTabularReportMixin(CustomProjectReport, ElasticProjectInspectionReport, ProjectReportParametersMixin):
def format_date(self, date_string, format=ISO_DATE_FORMAT):
try:
date_obj = dateutil.parser.parse(date_string)
return date_obj.strftime(format)
except:
return date_string
class PactDrilldownReportMixin(object):
# this is everything that's shared amongst the Pact reports
# this class is an amalgamation of random behavior and is just
# for convenience
report_template_path = ""
hide_filters = True
filters = []
flush_layout = True
# mobile_enabled = True
fields = []
es_results=None
@property
def render_next(self):
return None if self.rendered_as == "async" else self.rendered_as
@classmethod
def show_in_navigation(cls, *args, **kwargs):
return False
from pact.reports import patient_list, dot, patient, chw_list, chw, admin_dot_reports, admin_chw_reports
CUSTOM_REPORTS = (
("PACT Reports", (
patient_list.PatientListDashboardReport,
dot.PactDOTReport,
patient.PactPatientInfoReport,
chw_list.PactCHWDashboard,
chw.PactCHWProfileReport,
admin_dot_reports.PactDOTAdminReport,
admin_chw_reports.PactCHWAdminReport,
)),
)
|
import os
import six
from aleph.util import checksum
class Archive(object):
def _get_file_path(self, meta):
ch = meta.content_hash
if ch is None:
raise ValueError("No content hash available.")
path = os.path.join(ch[:2], ch[2:4], ch[4:6], ch)
file_name = 'data'
if meta.file_name is not None:
file_name = meta.file_name
else:
if meta.extension is not None:
file_name = '%s.%s' % (file_name, meta.extension)
return os.path.join(six.text_type(path), six.text_type(file_name))
def _update_metadata(self, filename, meta):
meta.content_hash = checksum(filename)
return meta
def upgrade(self):
"""Run maintenance on the store."""
pass
def archive_file(self, filename, meta, move=False):
"""Import the given file into the archive.
Return an updated metadata object. If ``move`` is given, the
original file will not exist afterwards.
"""
pass
def load_file(self, meta):
pass
def cleanup_file(self, meta):
pass
def generate_url(self, meta):
return
|
'''The veterinary app contains models for MedicalCondition, MedicalIssue and MedicalTreatment.'''
from django.db import models
from django.template.defaultfilters import slugify
from mousedb.animal.models import Animal
class MedicalIssue(models.Model):
'''This model contains details about a particular medical issue.
There is links to the :class:`~mousedb.animal.models.Animal`, the :class:`~mousedb.veterinary.models.MedicalCondition` and the :class:`~mousedb.veterinary.models.MedicalTreatment` choice.
The required fields are the animal and the condition.
There are also fields for the diagnosis date, veterinary code, treatment start and treatment end dates (not required).'''
animal = models.ForeignKey(Animal)
condition = models.ForeignKey('MedicalCondition', help_text = "The medical problem")
treatment = models.ForeignKey('MedicalTreatment', blank=True, null=True, help_text = "The course of treatment")
diagnosis = models.DateField(blank=True, null=True, help_text = "When this problem was noticed")
code = models.CharField(max_length=25,blank=True, null=True, help_text = "Veterinary Code")
treatment_start = models.DateField(blank=True, null=True, help_text = "When treatment began")
treatment_end = models.DateField(blank=True, null=True, help_text = "When treatment ceased")
def __unicode__(self):
'''The unicode representation is the animal field combined with the condition field.'''
return u'%s - %s' % (self.animal, self.condition)
@models.permalink
def get_absolute_url(self):
'''The url of a medical issue is **/veterinary/medical-issue/<id#>/**.'''
return ('medical-issue-detail', [str(self.id)])
class MedicalCondition(models.Model):
'''This model contains details about different types of medical conditions.
The only required field is the name.
There are auto-generated slug field and created and updated fields.
The slug field is not updated upon repeated saves, only on the first save for persistence.
There is also an optional notes field for extra information.'''
name = models.CharField(max_length = 100, unique=True)
slug = models.SlugField(max_length = 100, editable=False)
notes = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
'''The unicode representation is the name field.'''
return u'%s' % self.name
@models.permalink
def get_absolute_url(self):
'''The url of a medical issue is **/veterinary/medical-condition/<slug>/**.'''
return ('medical-condition-detail', [str(self.slug)])
def save(self, *args, **kwargs):
'''The save method is over-ridden to generate and save the slug field. This is only done with create, not update.'''
if not self.id:
self.slug = slugify(self.name)
super(MedicalCondition, self).save(*args, **kwargs)
class MedicalTreatment(models.Model):
'''This model defines details about a medical treatment.
There is one required field (name), the treatment name and one auto-generated field (slug).'''
name = models.CharField(max_length = 100, unique=True)
slug = models.SlugField(max_length = 100, editable=False)
def __unicode__(self):
'''The unicode representation is the name field.'''
return u'%s' % self.name
@models.permalink
def get_absolute_url(self):
'''The url of a medical issue is **/veterinary/medical-treatment/<slug>/**.'''
return ('medical-treatment-detail', [str(self.slug)])
def save(self, *args, **kwargs):
'''The save method is over-ridden to generate and save the slug field. This is only done with create, not update.'''
if not self.id:
self.slug = slugify(self.name)
super(MedicalTreatment, self).save(*args, **kwargs)
|
from datetime import datetime
from datetime import timedelta
from flask import Flask
from flask import redirect
from flask import render_template
from flask import url_for
from pocket_auth import authorize_user
from pocket_auth import get_authentication_url
from pocket_retrieve import get_read_articles_from_datetime
_APP = Flask(__name__)
_DEFAULT_LATEST_DAYS = 7
@_APP.route("/")
def authentication():
response = render_template(
'index.html',
authentication_url=get_authentication_url(),
)
return response
@_APP.route("/proxy")
def authentification_proxy():
access_token = authorize_user()
return redirect(
url_for('view_read_articles', access_token=access_token),
)
@_APP.route("/<string:access_token>/articles")
@_APP.route("/<string:access_token>/articles/days/<int:days>")
def view_read_articles(access_token, days=_DEFAULT_LATEST_DAYS):
date_time = datetime.today() - timedelta(days=days)
read_articles = get_read_articles_from_datetime(access_token, date_time)
return render_template(
'read_articles.html',
read_articles=read_articles,
days_since=days,
)
if __name__ == "__main__":
_APP.run(host='0.0.0.0')
|
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup,\
DihedralGroup, AlternatingGroup, AbelianGroup
def test_SymmetricGroup():
G = SymmetricGroup(5)
elements = list(G.generate())
assert (G.generators[0]).size == 5
assert len(elements) == 120
assert G.is_solvable == False
assert G.is_abelian == False
assert G.is_transitive == True
H = SymmetricGroup(1)
assert H.order() == 1
L = SymmetricGroup(2)
assert L.order() == 2
def test_CyclicGroup():
G = CyclicGroup(10)
elements = list(G.generate())
assert len(elements) == 10
assert (G.derived_subgroup()).order() == 1
assert G.is_abelian == True
H = CyclicGroup(1)
assert H.order() == 1
L = CyclicGroup(2)
assert L.order() == 2
def test_DihedralGroup():
G = DihedralGroup(6)
elements = list(G.generate())
assert len(elements) == 12
assert G.is_transitive == True
assert G.is_abelian == False
H = DihedralGroup(1)
assert H.order() == 2
L = DihedralGroup(2)
assert L.order() == 4
assert L.is_abelian == True
def test_AlternatingGroup():
G = AlternatingGroup(5)
elements = list(G.generate())
assert len(elements) == 60
assert [perm.is_even for perm in elements] == [True]*60
H = AlternatingGroup(1)
assert H.order() == 1
L = AlternatingGroup(2)
assert L.order() == 1
def test_AbelianGroup():
A = AbelianGroup(3, 3, 3)
assert A.order() == 27
assert A.is_abelian == True
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
@@SVM
@@SKCompat
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from server.BaseNode import expose
from server.RF2BaseNode import RF2BaseNode, global_iter_parms, validate
from rf2db.utils.sctid import sctid
from rf2db.db.RF2ConceptFile import ConceptDB, concept_parms, concept_list_parms, new_concept_parms, \
update_concept_parms, delete_concept_parms
from server.config.Rf2Entries import settings
concdb = ConceptDB()
class Concept(RF2BaseNode):
title = "Read RF2 concept by concept id"
label = "Concept SCTID"
value = settings.refConcept
@expose
@validate(concept_parms)
def default(self, parms, **kwargs):
dbrec = concdb.read(int(sctid(parms.concept)), **parms.dict)
return dbrec, (404, "Concept %s not found" % parms.concept)
@expose("POST")
@validate(new_concept_parms)
def new(self, parms, **kwargs):
# A POST cannot supply a concept id
kwargs.pop('concept', None)
dbrec = concdb.add(**parms.dict)
if isinstance(dbrec, str):
return None, (400, dbrec)
elif not dbrec:
return None, (500, "Unable to create concept record")
self.redirect('/concept/%s' % dbrec.id)
@expose(methods="PUT")
@validate(update_concept_parms)
def update(self, parms, concept, **_):
return concdb.update(concept, **parms.dict)
@expose(methods=["DELETE"])
@validate(delete_concept_parms)
def delete(self, parms, concept, **_):
return concdb.delete(concept, **parms.dict)
class Concepts(RF2BaseNode):
title = "List concepts starting after"
label = "Concept SCTID"
value = 0
extensions = RF2BaseNode.extensions + [global_iter_parms]
@expose
@validate(concept_list_parms)
def default(self, parms, **_):
return concdb.as_list(concdb.getAllConcepts(**parms.dict), parms)
|
import os
from six.moves import configparser, StringIO
import testtools
from jenkins_jobs import cmd
from tests.base import mock
class CmdTestsBase(testtools.TestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
parser = cmd.create_parser()
def setUp(self):
super(CmdTestsBase, self).setUp()
# Testing the cmd module can sometimes result in the CacheStorage class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the CacheStorage to ensure that each
# test can safely create the cache directory without risk of
# interference.
self.cache_patch = mock.patch('jenkins_jobs.builder.CacheStorage',
autospec=True)
self.cache_patch.start()
self.config = configparser.ConfigParser()
self.config.readfp(StringIO(cmd.DEFAULT_CONF))
def tearDown(self):
self.cache_patch.stop()
super(CmdTestsBase, self).tearDown()
class CmdTests(CmdTestsBase):
def test_with_empty_args(self):
"""
User passes no args, should fail with SystemExit
"""
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, cmd.main, [])
|
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
class ActionsHelper:
def __init__(self,app):
self.app = app
def dropdown_select(self, field_name, value):
print(''' drop-down <{1}> | value <{0}>'''.format(value, field_name))
select = Select(self.app.wd.find_element_by_name(field_name))
start = time.time()
while True:
try:
select.select_by_visible_text(value)
break
except:
assert time.time() - start < 5
time.sleep(1)
def file_select(self, field_name, value):
print("\tfile upload <{1}> |\tvalue <{0}>".format(value, field_name))
self.app.wd.find_element_by_name(field_name).send_keys(value)
def text_input(self, field_name, value):
print(''' text input <{1}> | value <{0}>'''.format(value, field_name))
self.app.wd.find_element_by_name(field_name).click()
self.app.wd.find_element_by_name(field_name).clear()
self.app.wd.find_element_by_name(field_name).send_keys(value)
def button_click(self, button_name):
self.app.wd.find_element_by_name(button_name).click()
def input_click(self, input_value):
self.app.wd.find_element_by_xpath("//input[@value='{0}']".format(input_value)).click()
def link_click(self, link_text):
self.app.wd.find_element_by_link_text(link_text).click()
def menu_item_click(self, menu_name):
print("Menu item name:{0} click".format(menu_name))
self.app.wd.find_element_by_name(menu_name).click()
def submit(self):
self.button_click("submit")
def update(self):
self.button_click("update")
def wait_button_clickable(self, button_name):
wait = WebDriverWait(self.app.wd, 10)
wait.until(EC.element_to_be_clickable((By.XPATH, "//input[@value='{0}']".format(button_name))))
|
from xml.etree import ElementTree as ET
import os, csv, json
from burney_data import BurneyDB
import sys
STORE = "articleleveljson"
class CorruptXML(Exception):
pass
def store_paper(newspaper, year, month, day, obj):
os.makedirs(os.path.join(STORE, newspaper, year), exist_ok = True)
with open(os.path.join(STORE, newspaper, year, "{0}_{1}.json".format(month, day)), "w") as ofp:
json.dump(obj, ofp)
def get_page_text(filename):
assert(os.path.isfile(filename))
text = []
with open(filename, "r") as fl:
try:
r = fl.read()
doc = ET.fromstring(r)
except Exception as e:
raise CorruptXML
title = ""
# try to get the article title...?
title_ele = doc.find("BL_article/article_metadata/dc_metadata/{http://purl.org/dc/elements/1.1/}Title")
if title_ele != None:
title = title_ele.text
for words in doc.findall("BL_article/image_metadata/articleImage/articleText/articleWord"):
text.append(words.text)
return title, " ".join([x for x in text if x])
def get_article_list(path):
return [x for x in os.listdir(path) if x.endswith("xml") and len(x.split("-")) == 3]
if __name__ == "__main__":
BURNEY_DB = "burney.db.fullscan"
db = BurneyDB(BURNEY_DB)
with db:
newspapers = [x['titleAbbreviation'] for x in db.list_all_newspapers()]
for newspaper in newspapers:
title_md = db.get_title_row(titleAbbreviation = newspaper)
for row in db.list_all_entries(title_id = title_md['id']):
ppath = row['filepath']
year = row['year']
month = row['month']
day = row['day']
if not os.path.isfile(os.path.join(STORE, newspaper, year, "{0}_{1}.json".format(month, day))):
print("Rendering out text for {0} - {3}/{2}/{1}".format(newspaper, year, month, day))
arts = get_article_list(ppath)
doc = {}
for art in arts:
_,pg, artno = art.split("-")
try:
doc["{0}_{1}".format(pg, artno[:-4])] = get_page_text(os.path.join(ppath, art))
except CorruptXML as e:
# failed to parse the article XML
db.mark_corrupt({'newspaper': newspaper,
'filepath': ppath,
'day': day,
'month': month,
'year': year,
'xmlfile': os.path.join(ppath, art)})
store_paper(newspaper, year, month, day, doc)
else:
print("Done {0} - {3}/{2}/{1} already".format(newspaper, year, month, day))
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import jsunpack
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VidUpResolver(UrlResolver):
name = "vidup"
domains = ["vidup.org", "vidup.me"]
pattern = '(?://|\.)(vidup.(?:me|org))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
best_stream_url = ''
max_quality = 0
for match in re.finditer('(eval\(function.*?)</script>', html, re.DOTALL):
js_data = jsunpack.unpack(match.group(1))
js_data = js_data.replace("\\'", "'")
r = re.findall(r"label\s*:\s*'([^']+)p'\s*,\s*file\s*:\s*'([^']+)", js_data)
if r:
for quality, stream_url in r:
if int(quality) >= max_quality:
best_stream_url = stream_url
max_quality = int(quality)
if best_stream_url:
return best_stream_url
raise ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return 'http://vidup.me/embed-%s.html' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
import collections
def get_charcode(text):
return int(text[2:], 16)
def nnprint(text):
print(text, end='')
d = collections.defaultdict(set)
# Create a bidirectional graph
for line in open('Unihan_Variants.txt'):
if line[0] == '#' or line.strip() == '': continue
from_codepoint, variant_style, tos = line.split(maxsplit=2)
if variant_style in ['kTraditionalVariant', 'kSimplifiedVariant', 'kZVariant']:
for to in tos.split():
to_codepoint = to.split('<')[0].strip()
from_charcode = get_charcode(from_codepoint)
to_charcode = get_charcode(to_codepoint)
d[from_charcode].add(to_charcode)
d[to_charcode].add(from_charcode)
# Find connected subgraphs using BFSs
visited = set()
for codepoint in d.keys():
if codepoint in visited:
continue
nodes = {codepoint}
queue = [codepoint]
while len(queue) > 0:
current = queue.pop()
visited.add(current)
nodes.add(current)
for adjacent in d[current]:
if adjacent not in nodes:
queue.insert(0, adjacent)
for node in nodes:
d[node] = nodes - {node}
entries = []
for from_codepoint in sorted(d.keys()):
character_string = str(from_codepoint) + ':['
character_string += ','.join(map(str, d[from_codepoint])) + ']'
entries.append(character_string)
js = 'var variantsMap = {' + ','.join(entries) + '};'
open('www/js/variants.js', 'w').write(js)
print(js)
|
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from oslo_config import cfg
import random
import testtools
from mistral.db.sqlalchemy import sqlite_lock
from mistral.db.v2.sqlalchemy import api as db_api
from mistral.db.v2.sqlalchemy import models as db_models
from mistral.tests import base as test_base
WF_EXEC = {
'name': '1',
'spec': {},
'start_params': {},
'state': 'RUNNING',
'state_info': "Running...",
'created_at': None,
'updated_at': None,
'context': None,
'task_id': None,
'trust_id': None
}
@testtools.skipIf(
'sqlite' not in cfg.CONF.database.connection,
'Not using SQLite for DB backend.')
class SQLiteLocksTest(test_base.DbTestCase):
def setUp(self):
super(SQLiteLocksTest, self).setUp()
cfg.CONF.set_default('auth_enable', True, group='pecan')
self.addCleanup(
cfg.CONF.set_default,
'auth_enable',
False,
group='pecan'
)
def _random_sleep(self):
eventlet.sleep(random.Random().randint(0, 10) * 0.001)
def _run_acquire_release_sqlite_lock(self, obj_id, session):
self._random_sleep()
sqlite_lock.acquire_lock(obj_id, session)
self._random_sleep()
sqlite_lock.release_locks(session)
def test_acquire_release_sqlite_lock(self):
threads = []
id = "object_id"
number = 500
for i in range(1, number):
threads.append(
eventlet.spawn(self._run_acquire_release_sqlite_lock, id, i)
)
[t.wait() for t in threads]
[t.kill() for t in threads]
self.assertEqual(1, len(sqlite_lock.get_locks()))
sqlite_lock.cleanup()
self.assertEqual(0, len(sqlite_lock.get_locks()))
def _run_correct_locking(self, wf_ex):
self._random_sleep()
with db_api.transaction():
# Here we lock the object before it gets loaded into the
# session and prevent reading the same object state by
# multiple transactions. Hence the rest of the transaction
# body works atomically (in a serialized manner) and the
# result (object name) must be equal to a number of
# transactions.
db_api.acquire_lock(db_models.WorkflowExecution, wf_ex.id)
# Refresh the object.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
wf_ex.name = str(int(wf_ex.name) + 1)
return wf_ex.name
def test_correct_locking(self):
wf_ex = db_api.create_workflow_execution(WF_EXEC)
threads = []
number = 500
for i in range(1, number):
threads.append(
eventlet.spawn(self._run_correct_locking, wf_ex)
)
[t.wait() for t in threads]
[t.kill() for t in threads]
wf_ex = db_api.get_workflow_execution(wf_ex.id)
print("Correct locking test gave object name: %s" % wf_ex.name)
self.assertEqual(str(number), wf_ex.name)
def _run_invalid_locking(self, wf_ex):
self._random_sleep()
with db_api.transaction():
# Load object into the session (transaction).
wf_ex = db_api.get_workflow_execution(wf_ex.id)
# It's too late to lock the object here because it's already
# been loaded into the session so there should be multiple
# threads that read the same object state so they write the
# same value into DB. As a result we won't get a result
# (object name) equal to a number of transactions.
db_api.acquire_lock(db_models.WorkflowExecution, wf_ex.id)
wf_ex.name = str(int(wf_ex.name) + 1)
return wf_ex.name
def test_invalid_locking(self):
wf_ex = db_api.create_workflow_execution(WF_EXEC)
threads = []
number = 500
for i in range(1, number):
threads.append(
eventlet.spawn(self._run_invalid_locking, wf_ex)
)
[t.wait() for t in threads]
[t.kill() for t in threads]
wf_ex = db_api.get_workflow_execution(wf_ex.id)
print("Invalid locking test gave object name: %s" % wf_ex.name)
self.assertNotEqual(str(number), wf_ex.name)
|
from bottle import abort, get, post, redirect, request, run, view
from models import User, Post
from session import create_session, user_required
# [START index]
@get("/")
@view("index")
@user_required()
def index(user):
cursor, tag = request.params.cursor, request.params.tag
posts = Post.query().order_by(-Post.created_at)
if tag:
posts = posts.where(Post.tags == tag)
pages = posts.paginate(page_size=2, cursor=cursor)
return {"user": user, "pages": pages}
# [END index]
# [START post]
@get("/posts/<slug>")
@view("post")
@user_required("read")
def view_post(user, slug):
post = Post.query().where(Post.slug == slug).get()
if post is None:
return abort(404)
return {"post": post, "embedded": False}
# [END post]
# [START create]
@get("/new")
@view("create")
@user_required("create")
def create(user):
return {}
@post("/new")
@view("create")
@user_required("create")
def do_create(user):
title = request.forms.title
tags = [tag.strip() for tag in request.forms.tags.split(",") if tag.strip()]
body = request.forms.body
post = Post(author=user, title=title, tags=tags, body=body).put()
return redirect("/posts/" + post.slug)
# [END create]
# [START login]
@get("/login")
@view("login")
def login():
return {}
@post("/login")
@view("login")
def do_login():
username = request.forms.username
password = request.forms.password
user = User.login(username, password)
if not user:
return {"error": "Invalid credentials."}
create_session(user)
return redirect("/")
# [END login]
# [START run]
run(host="localhost", port=8080, debug=True, reloader=True)
# [END run]
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from .imdb import imdb
from .shapenet import shapenet
from .shapenet_scene import shapenet_scene
from .rgbd_scenes import rgbd_scenes
from .lov import lov
from . import factory
import os.path as osp
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
# We assume your matlab binary is in your path and called `matlab'.
# If either is not true, just add it to your path and alias it as matlab, or
# you could change this file.
MATLAB = 'matlab'
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def _which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
"""
if _which(MATLAB) is None:
msg = ("MATLAB command '{}' not found. "
"Please add '{}' to your PATH.").format(MATLAB, MATLAB)
raise EnvironmentError(msg)
"""
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 3
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
return
def create_system(options, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_CMP_directory':
panic("This script requires the MOESI_CMP_directory protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
l1_cntrl = L1Cache_Controller(version = i,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
transitions_per_cycle = options.ports,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = ruby_system.network.slave
l1_cntrl.requestToL1Cache = ruby_system.network.master
l1_cntrl.responseToL1Cache = ruby_system.network.master
# added for 757 project
l1_cntrl.predictor = RubySnoopBasicPred()
l1_cntrl.sequencer = cpu_seq
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
l2_cntrl.sb = MulticastScoreboard()
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = \
RubyDirectoryMemory(version = i,
size = dir_size,
use_map = options.use_map),
memBuffer = mem_cntrl,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
dir_cntrl.forwardFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
|
#!/usr/bin/env python
import logging
l = logging.getLogger("simuvex.plugins.memory")
import claripy
from ..plugins.plugin import SimStatePlugin
class AddressWrapper(object):
"""
AddressWrapper is used in SimAbstractMemory, which provides extra meta information for an address (or a ValueSet
object) that is normalized from an integer/BVV/StridedInterval.
"""
def __init__(self, region, address, is_on_stack, function_address):
"""
Constructor for the class AddressWrapper.
:param region: Name of the memory regions it belongs to
:param address: An address (not a ValueSet object)
:param is_on_stack: Whether this address is on a stack region or not
:param function_address: Related function address (if any)
"""
self.region = region
self.address = address
self.is_on_stack = is_on_stack
self.function_address = function_address
def __hash__(self):
return hash((self.region, self.address))
def __eq__(self, other):
return self.region == other.region and self.address == other.address
def __repr__(self):
return "<%s> %s" % (self.region, hex(self.address))
class MemoryStoreRequest(object):
'''
A MemoryStoreRequest is used internally by SimMemory to track memory request data.
'''
def __init__(self, addr, data=None, size=None, condition=None, endness=None):
self.addr = addr
self.data = data
self.size = size
self.condition = condition
self.endness = endness
# was this store done?
self.completed = False
# stuff that's determined during handling
self.actual_addresses = None
self.constraints = [ ]
self.fallback_values = None
self.symbolic_sized_values = None
self.conditional_values = None
self.simplified_values = None
self.stored_values = None
class SimMemory(SimStatePlugin):
def __init__(self, endness=None, abstract_backer=None):
SimStatePlugin.__init__(self)
self.id = None
self.endness = "Iend_BE" if endness is None else endness
# Whether this memory is internally used inside SimAbstractMemory
self._abstract_backer = abstract_backer
def _resolve_location_name(self, name):
if self.id == 'reg':
return self.state.arch.registers[name]
elif name[0] == '*':
return self.state.registers.load(name[1:]), None
else:
raise SimMemoryError("Trying to address memory with a register name.")
def _convert_to_ast(self, data_e, size_e=None):
"""
Make an AST out of concrete @data_e
"""
if type(data_e) is str:
# Convert the string into a BitVecVal, *regardless of endness*
bits = len(data_e) * 8
data_e = self.state.BVV(data_e, bits)
elif type(data_e) in (int, long):
data_e = self.state.se.BVV(data_e, size_e*8 if size_e is not None
else self.state.arch.bits)
else:
data_e = data_e.to_bv()
return data_e
def store(self, addr, data, size=None, condition=None, add_constraints=None, endness=None, action=None):
'''
Stores content into memory.
@param addr: a claripy expression representing the address to store at
@param data: the data to store (claripy expression or something convertable to a
claripy expression)
@param size: a claripy expression representing the size of the data to store
@param condition: (optional) a claripy expression representing a condition
if the store is conditional
@param add_constraints: add constraints resulting from the merge (default: True)
@param endness: The endianness for the data
@param action: a SimActionData to fill out with the final written value and constraints
'''
addr_e = _raw_ast(addr)
data_e = _raw_ast(data)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
add_constraints = True if add_constraints is None else add_constraints
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
# store everything as a BV
data_e = self._convert_to_ast(data_e)
if type(size_e) in (int, long):
size_e = self.state.se.BVV(size_e, self.state.arch.bits)
if self.id == 'reg': self.state._inspect('reg_write', BP_BEFORE, reg_write_offset=addr_e, reg_write_length=size_e, reg_write_expr=data_e)
if self.id == 'mem': self.state._inspect('mem_write', BP_BEFORE, mem_write_address=addr_e, mem_write_length=size_e, mem_write_expr=data_e)
request = MemoryStoreRequest(addr_e, data=data_e, size=size_e, condition=condition_e, endness=endness)
self._store(request)
if self.id == 'reg': self.state._inspect('reg_write', BP_AFTER)
if self.id == 'mem': self.state._inspect('mem_write', BP_AFTER)
if add_constraints and len(request.constraints) > 0:
self.state.add_constraints(*request.constraints)
if request.completed and o.AUTO_REFS in self.state.options and action is None:
ref_size = size if size is not None else (data_e.size() / 8)
action = SimActionData(self.state, self.id, 'write', addr=addr, data=data, size=ref_size, condition=condition)
self.state.log.add_action(action)
if request.completed and action is not None:
action.actual_addrs = request.actual_addresses
action.actual_value = action._make_object(request.stored_values[0]) # TODO
if len(request.constraints) > 0:
action.added_constraints = action._make_object(self.state.se.And(*request.constraints))
else:
action.added_constraints = action._make_object(self.state.se.true)
def _store(self, request):
raise NotImplementedError()
def store_cases(self, addr, contents, conditions, fallback=None, add_constraints=None, endness=None, action=None):
'''
Stores content into memory, conditional by case.
@param addr: a claripy expression representing the address to store at
@param contents: a list of bitvectors, not necessarily of the same size. Use
None to denote an empty write
@param conditions: a list of conditions. Must be equal in length to contents
@param fallback: (optional) a claripy expression representing what the write
should resolve to if all conditions evaluate to false (default:
whatever was there before)
@param add_constraints: add constraints resulting from the merge (default: True)
@param endness: the endianness for contents as well as fallback
@param action: a SimActionData to fill out with the final written value and constraints
'''
if fallback is None and all(c is None for c in contents):
l.debug("Avoiding an empty write.")
return
addr_e = _raw_ast(addr)
contents_e = _raw_ast(contents)
conditions_e = _raw_ast(conditions)
fallback_e = _raw_ast(fallback)
max_bits = max(c.length for c in contents_e if isinstance(c, claripy.ast.Bits)) if fallback is None else fallback.length
# if fallback is not provided by user, load it from memory
# remember to specify the endianness!
fallback_e = self.load(addr, max_bits/8, add_constraints=add_constraints, endness=endness) if fallback_e is None else fallback_e
req = self._store_cases(addr_e, contents_e, conditions_e, fallback_e, endness=endness)
if add_constraints:
self.state.add_constraints(*req.constraints)
if req.completed and o.AUTO_REFS in self.state.options and action is None:
action = SimActionData(self.state, self.id, 'write', addr=addr, data=req.stored_values[-1], size=max_bits/8, condition=self.state.se.Or(*conditions), fallback=fallback)
self.state.log.add_action(action)
if req.completed and action is not None:
action.actual_addrs = req.actual_addresses
action.actual_value = action._make_object(req.stored_values[-1])
action.added_constraints = action._make_object(self.state.se.And(*req.constraints) if len(req.constraints) > 0 else self.state.se.true)
def _store_cases(self, addr, contents, conditions, fallback, endness=None):
extended_contents = [ ]
for c in contents:
if c is None:
c = fallback
else:
need_bits = fallback.length - c.length
if need_bits > 0:
c = c.concat(fallback[need_bits-1:0])
extended_contents.append(c)
case_constraints = { }
for c,g in zip(extended_contents, conditions):
if c not in case_constraints:
case_constraints[c] = [ ]
case_constraints[c].append(g)
unique_contents = [ ]
unique_constraints = [ ]
for c,g in case_constraints.items():
unique_contents.append(c)
unique_constraints.append(self.state.se.Or(*g))
if len(unique_contents) == 1 and unique_contents[0] is fallback:
req = MemoryStoreRequest(addr, data=fallback, endness=endness)
return self._store(req)
else:
simplified_contents = [ ]
simplified_constraints = [ ]
for c,g in zip(unique_contents, unique_constraints):
simplified_contents.append(self.state.se.simplify(c))
simplified_constraints.append(self.state.se.simplify(g))
cases = zip(simplified_constraints, simplified_contents)
#cases = zip(unique_constraints, unique_contents)
ite = self.state.se.simplify(self.state.se.ite_cases(cases, fallback))
req = MemoryStoreRequest(addr, data=ite, endness=endness)
return self._store(req)
def load(self, addr, size=None, condition=None, fallback=None, add_constraints=None, action=None, endness=None):
'''
Loads size bytes from dst.
@param dst: the address to load from
@param size: the size (in bytes) of the load
@param condition: a claripy expression representing a condition for a conditional load
@param fallback: a fallback value if the condition ends up being False
@param add_constraints: add constraints resulting from the merge (default: True)
@param action: a SimActionData to fill out with the constraints
@param endness: the endness to load with
There are a few possible return values. If no condition or fallback are passed in,
then the return is the bytes at the address, in the form of a claripy expression.
For example:
<A BVV(0x41, 32)>
On the other hand, if a condition and fallback are provided, the value is conditional:
<A If(condition, BVV(0x41, 32), fallback)>
'''
add_constraints = True if add_constraints is None else add_constraints
addr_e = _raw_ast(addr)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
fallback_e = _raw_ast(fallback)
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
if size is None:
size = self.state.arch.bits / 8
size_e = size
if self.id == 'reg': self.state._inspect('reg_read', BP_BEFORE, reg_read_offset=addr_e, reg_read_length=size_e)
if self.id == 'mem': self.state._inspect('mem_read', BP_BEFORE, mem_read_address=addr_e, mem_read_length=size_e)
a,r,c = self._load(addr_e, size_e, condition=condition_e, fallback=fallback_e)
if add_constraints:
self.state.add_constraints(*c)
if (self.id == 'mem' and o.SIMPLIFY_MEMORY_READS in self.state.options) or \
(self.id == 'reg' and o.SIMPLIFY_REGISTER_READS in self.state.options):
l.debug("simplifying %s read...", self.id)
r = self.state.simplify(r)
if not self._abstract_backer and \
self.id != 'reg' and \
o.UNINITIALIZED_ACCESS_AWARENESS in self.state.options and \
self.state.uninitialized_access_handler is not None and \
(r.op == 'Reverse' or r.op == 'I') and \
hasattr(r.model, 'uninitialized') and \
r.model.uninitialized:
normalized_addresses = self.normalize_address(addr)
if len(normalized_addresses) > 0 and type(normalized_addresses[0]) is AddressWrapper:
normalized_addresses = [ (aw.region, aw.address) for aw in normalized_addresses ]
self.state.uninitialized_access_handler(self.id, normalized_addresses, size, r, self.state.scratch.bbl_addr, self.state.scratch.stmt_idx)
# the endness
endness = self.endness if endness is None else endness
if endness == "Iend_LE":
r = r.reversed
if self.id == 'mem': self.state._inspect('mem_read', BP_AFTER, mem_read_expr=r)
if self.id == 'reg': self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r)
if o.AST_DEPS in self.state.options and self.id == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
if o.AUTO_REFS in self.state.options and action is None:
ref_size = size if size is not None else (r.size() / 8)
action = SimActionData(self.state, self.id, 'read', addr=addr, data=r, size=ref_size, condition=condition, fallback=fallback)
self.state.log.add_action(action)
if action is not None:
action.actual_addrs = a
action.added_constraints = action._make_object(self.state.se.And(*c) if len(c) > 0 else self.state.se.true)
return r
def normalize_address(self, addr, is_write=False): #pylint:disable=no-self-use,unused-argument
'''
Normalizes the address for use in static analysis (with the abstract memory
model). In non-abstract mode, simply returns the address in a single-element
list.
'''
return [ addr ]
def _load(self, addr, size, condition=None, fallback=None):
raise NotImplementedError()
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None):
'''
Returns the address of bytes equal to 'what', starting from 'start'. Note that,
if you don't specify a default value, this search could cause the state to go
unsat if no possible matching byte exists.
@param start: the start address
@param what: what to search for
@param max_search: search at most this many bytes
@param max_symbolic_bytes: search through at most this many symbolic bytes
@param default: the default value, if what you're looking for wasn't found
@returns an expression representing the address of the matching byte
'''
addr = _raw_ast(addr)
what = _raw_ast(what)
default = _raw_ast(default)
r,c,m = self._find(addr, what, max_search=max_search, max_symbolic_bytes=max_symbolic_bytes, default=default)
if o.AST_DEPS in self.state.options and self.id == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
return r,c,m
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None):
raise NotImplementedError()
def copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
'''
Copies data within a memory.
@param dst: claripy expression representing the address of the destination
@param src: claripy expression representing the address of the source
@param src_memory: (optional) copy data from this SimMemory instead of self
@param src_memory: (optional) copy data to this SimMemory instead of self
@param size: claripy expression representing the size of the copy
@param condition: claripy expression representing a condition, if the write should
be conditional. If this is determined to be false, the size of
the copy will be 0
'''
dst = _raw_ast(dst)
src = _raw_ast(src)
size = _raw_ast(size)
condition = _raw_ast(condition)
return self._copy_contents(dst, src, size, condition=condition, src_memory=src_memory, dst_memory=dst_memory)
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None):
raise NotImplementedError()
from .. import s_options as o
from ..s_action import SimActionData
from ..s_action_object import SimActionObject, _raw_ast
from ..s_errors import SimMemoryError
from ..plugins.inspect import BP_BEFORE, BP_AFTER
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <[email protected]>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
import gc
import leveldb
import logging
import StringIO
import marshal as pickle
from hugin import numencode
from hugin.relsearch import REL_ID_NAME_MAP
from hugin.relsearch import REL_NAME_ID_MAP
class ArgType(object):
NONE = -1
EMPTY = -2
STR_NONE = "<NONE>"
STR_EMPTY = "<->"
POS_DELIMITER = "-"
POS_NONE = "POS>"
class TripleReader(object):
def parse_triple_row(self, ts_row):
arguments = []
for i in range(1, (len(ts_row) - 1)):
argument = ts_row[i]
if argument == ArgType.STR_NONE:
arguments.append(ArgType.NONE)
elif argument == ArgType.STR_EMPTY:
arguments.append(ArgType.EMPTY)
else:
lemma_pos = argument.split(ArgType.POS_DELIMITER)
if lemma_pos[-1] == ArgType.POS_NONE:
arguments.append(ArgType.NONE)
else:
arguments.append("-".join(lemma_pos[0:(len(lemma_pos) - 1)]))
return ts_row[0], arguments, int(ts_row[-1])
def iter_triples(self, i_file):
for line in i_file:
row = line.split(", ")
triple = self.parse_triple_row(row)
yield triple
class TripleIndex(object):
def __init__(self, data_dir):
# term = str()
# triple = str()
# args(triple) = (int)
self.data_dir = data_dir
# table: id(term) -> term
self.term_id_map = None
# table: id(triple) -> args(triple)
self.triple_id_map = None
# table: id(term) -> args(triple)
self.arg_cache = None
self.rel_id_map = REL_NAME_ID_MAP
self.id_rel_map = REL_ID_NAME_MAP
try:
import lz4 as compressor
self.compress = compressor.compress
self.compressHC = compressor.compressHC
self.decompress = compressor.decompress
except ImportError:
import zlib as compressor
self.compress = lambda data: compressor.compress(data, 3)
self.compressHC = lambda data: compressor.compress(data, 9)
self.decompress = lambda data: compressor.decompress(data)
@staticmethod
def triple2stamp(triple, term_id_map):
rel_name = triple[0]
rel_id = REL_NAME_ID_MAP[rel_name]
args = triple[1]
stamp = [rel_id]
for arg in args:
if arg == ArgType.NONE:
stamp.append(arg)
elif arg != ArgType.EMPTY:
stamp.append(term_id_map[arg])
stamp.append(triple[-1])
return tuple(stamp)
@staticmethod
def stamp2triple(stamp, id_term_map, map_none=False):
triple = [REL_ID_NAME_MAP[stamp[0]]]
for i in range(1, len(stamp) - 1):
if stamp[i] >= 0:
triple.append(id_term_map[stamp[i]])
else:
if map_none:
triple.append("<NONE>")
else:
triple.append(stamp[i])
triple.append(stamp[-1])
return triple
@staticmethod
def stamp_arg(stamp):
return stamp[1: len(stamp) - 1]
def __commit_triples(self, batch_size=64):
triple_store = leveldb.LevelDB("%s/triple.db" % self.data_dir)
batch = []
tr_id = 0
batch_key = 0
while tr_id < len(self.triple_id_map):
batch.append(self.triple_id_map[tr_id])
if tr_id % batch_size == batch_size - 1:
batch_data = self.compressHC(pickle.dumps(batch))
triple_store.Put(numencode.encode_uint(batch_key), batch_data)
batch = []
batch_key += 1
tr_id += 1
if len(batch) > 0:
batch_data = self.compressHC(pickle.dumps(batch))
triple_store.Put(numencode.encode_uint(batch_key), batch_data)
def load_triples(self, batch_size=64):
id_triple_map = dict()
triple_store = leveldb.LevelDB("%s/triple.db" % self.data_dir)
for batch_key, batch_data in triple_store.RangeIter():
batch = pickle.loads(self.decompress(batch_data))
batch_key = numencode.decode_uint(batch_key)
for i in xrange(len(batch)):
tr_id = batch_key * batch_size + i
id_triple_map[tr_id] = batch[i]
logging.info("INDEX: LOADED %d TRIPLES" % len(id_triple_map))
return id_triple_map
def __commit_terms(self, batch_size=64):
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
batch = []
term_id = 0
batch_key = 0
while term_id < len(self.term_id_map):
batch.append(self.id_term_map[term_id])
if term_id % batch_size == batch_size - 1:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
batch = []
batch_key += 1
term_id += 1
if len(batch) > 0:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
def load_terms(self, batch_size=64):
id_term_map = dict()
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
for batch_key, batch_data in term_store.RangeIter():
batch = pickle.loads(self.decompress(batch_data))
batch_key = numencode.decode_uint(batch_key)
for i in xrange(len(batch)):
term_id = batch_key * batch_size + i
id_term_map[term_id] = batch[i]
logging.info("INDEX: LOADED %d TERMS" % len(id_term_map))
return id_term_map
def decode_posting_list(self, plist_blob):
plist = numencode.decode_plist(self.decompress(plist_blob))
return plist
def encode_posting_list(self, plist):
return self.compressHC(numencode.encode_plist(plist))
def update_posting_list(self, old_plist_blob, new_plist):
plist_blob = self.decompress(old_plist_blob)
updated_plist = numencode.update_plist(plist_blob, new_plist)
return self.compressHC(updated_plist)
def __update_arg_index(self):
w_batch = leveldb.WriteBatch()
arg_index = leveldb.LevelDB("%s/arg.index" % self.data_dir)
for term_id, plist in self.arg_cache.iteritems():
term_key = numencode.encode_uint(term_id)
try:
old_plist_blob = arg_index.Get(term_key)
except KeyError:
old_plist_blob = None
if old_plist_blob is None:
plist_blob = self.encode_posting_list(plist)
else:
plist_blob = self.update_posting_list(old_plist_blob, plist)
w_batch.Put(term_key, plist_blob)
arg_index.Write(w_batch, sync=True)
def __cache_triple(self, triple_stamp):
tr_id = len(self.triple_id_map)
self.triple_id_map.append(triple_stamp)
return tr_id
def __cache_term(self, term):
if term not in self.term_id_map:
term_id = len(self.term_id_map)
self.id_term_map.append(term)
self.term_id_map[term] = term_id
def __cache_arg_posting_list(self, triple_id, stamp):
for i in range(1, len(stamp) - 1):
if stamp[i] >= 0:
if stamp[i] in self.arg_cache:
self.arg_cache[stamp[i]].append((triple_id, i))
else:
self.arg_cache[stamp[i]] = [(triple_id, i)]
def create_index(self, triples, threshold=10, cache_size=1000 ** 2):
i = 0
self.id_term_map = []
self.term_id_map = dict()
self.triple_id_map = []
self.arg_cache = dict()
cached = 0
logging.info("starting creating index")
for triple in triples:
args = triple[1]
freq = triple[-1]
for term in args:
if isinstance(term, basestring):
self.__cache_term(term)
stamp = self.triple2stamp(triple, self.term_id_map)
if freq > threshold:
i += 1
tr_id = self.__cache_triple(stamp)
self.__cache_arg_posting_list(tr_id, stamp)
cached += 1
if cached > cache_size:
logging.info("%dM triples done, flushing cache" % i)
self.__update_arg_index()
cached = 0
self.arg_cache = dict()
gc.collect()
self.__commit_terms()
self.__commit_triples()
self.__update_arg_index()
self.arg_cache = dict()
self.term_id_map = dict()
self.triple_id_map = []
def arg_index(self):
return leveldb.LevelDB("%s/arg.index" % self.data_dir)
class TripleSearchEngine(object):
def __init__(self, triple_index):
self.index = triple_index
self.id_term_map = triple_index.load_terms()
self.term_id_map = dict()
self.id_triple_map = triple_index.load_triples()
for term_id, term in self.id_term_map.iteritems():
self.term_id_map[term] = term_id
self.arg_index = triple_index.arg_index()
def search(self, rel_type=None, arg_query=()):
norm_query = []
for arg in arg_query:
if isinstance(arg, list) or isinstance(arg, tuple):
term, pos = arg
if isinstance(term, basestring):
if isinstance(term, unicode):
term = term.encode("utf-8")
term_id = self.term_id_map.get(term)
else:
term_id = term
elif isinstance(arg, basestring):
term, pos = arg, -1
if isinstance(term, unicode):
term = term.encode("utf-8")
term_id = self.term_id_map.get(term)
elif isinstance(arg, int):
term_id, pos = arg, -1
else:
term_id, pos = None, -1
if term_id is not None and term_id in self.id_term_map:
norm_query.append((term_id, pos))
results = None
for term_id, pos in norm_query:
try:
plist_blob = self.arg_index.Get(numencode.encode_uint(term_id))
plist = self.index.decode_posting_list(plist_blob)
except KeyError:
plist = []
if pos != -1:
plist = filter(lambda plist_el: plist_el[1] == pos, plist)
plist = [plist_el[0] for plist_el in plist]
plist = set(plist)
if results is None:
results = plist
else:
results &= plist
if results is None:
return ()
results = [self.id_triple_map[triple_id] for triple_id in results]
if rel_type is not None:
results = filter(lambda triple: triple[0] == rel_type, results)
return results
def print_result(self, search_result, max_results=10):
for triple in search_result[:max_results]:
triple_str = "<Triple(%s, " % self.index.id_rel_map[triple[0]]
for i in range(1, len(triple) - 1):
if triple[i] >= 0:
triple_str += "%s, " % self.id_term_map[triple[i]]
else:
triple_str += "NONE, "
triple_str += " %d>" % triple[-1]
print triple_str
def pprint(self, triple):
pstr = StringIO.StringIO()
pstr.write("{")
pstr.write(REL_ID_NAME_MAP[triple[0]])
pstr.write(";")
terms = ";".join([self.id_term_map[term_id] if term_id >= 0 else "NONE" for term_id in triple[1:-1]])
pstr.write(terms)
pstr.write("}")
return pstr.getvalue()
class SimpleObjectIndex(object):
def __init__(self, data_dir, obj_to_terms, obj_to_str, str_to_obj):
self.data_dir = data_dir
self.obj_to_terms = obj_to_terms
self.obj_to_str = obj_to_str
self.str_to_obj = str_to_obj
self.id_term_map = None
self.term_id_map = None
self.objnum = 0
try:
import lz4 as compressor
self.compress = compressor.compress
self.compressHC = compressor.compressHC
self.decompress = compressor.decompress
except ImportError:
import zlib as compressor
self.compress = lambda data: compressor.compress(data, 3)
self.compressHC = lambda data: compressor.compress(data, 9)
self.decompress = lambda data: compressor.decompress(data)
def load_all(self):
id_term_map = self.load_terms()
self.id_term_map = [None] * len(id_term_map)
self.term_id_map = dict()
for term_id, term in id_term_map.iteritems():
self.id_term_map[term_id] = term
self.term_id_map[term] = term_id
self.objnum = self.load_objnum()
def load_objnum(self):
objnum_fl_path = "%s/OBJNUM" % self.data_dir
try:
with open(objnum_fl_path, "r") as objnum_fl:
objnum = int(objnum_fl.read())
except IOError:
objnum = 0
logging.info("LOADED DOCNUM %d" % objnum)
return objnum
def update_objnum(self, new_objnum):
objnum_fl_path = "%s/OBJNUM" % self.data_dir
prev_objnum = self.load_objnum()
with open(objnum_fl_path, "w") as objnum_fl:
objnum_fl.write(str(new_objnum))
logging.info("OBJNUM updated %d => %d [+%d]" % (prev_objnum, new_objnum, new_objnum - prev_objnum))
return new_objnum - prev_objnum
def decode_posting_list(self, plist_blob):
plist = numencode.decode_1d_plist(self.decompress(plist_blob))
return plist
def encode_posting_list(self, plist):
return self.compressHC(numencode.encode_1d_plist(plist))
def update_posting_list(self, old_plist_blob, new_plist):
plist_blob = self.decompress(old_plist_blob)
updated_plist = numencode.update_1d_plist(plist_blob, new_plist)
return self.compressHC(updated_plist)
def update_posting_lists(self, post_lists):
plist_store = leveldb.LevelDB("%s/plist.index" % self.data_dir)
w_batch = leveldb.WriteBatch()
upd_num = 0
new_num = 0
for term_id, plist in post_lists.iteritems():
term_key = numencode.encode_uint(term_id)
try:
old_plist_blob = plist_store.Get(term_key)
upd_num += 1
except KeyError:
new_num += 1
old_plist_blob = None
if old_plist_blob is None:
plist_blob = self.encode_posting_list(plist)
else:
plist_blob = self.update_posting_list(old_plist_blob, plist)
w_batch.Put(term_key, plist_blob)
plist_store.Write(w_batch, sync=True)
logging.info("updated %d plists, %d new" % (upd_num, new_num))
def load_posting_list(self, term_id, plist_store):
term_key = numencode.encode_uint(term_id)
plist_blob = plist_store.Get(term_key)
plist = self.decode_posting_list(plist_blob)
return plist
def write_objects(self, id_object_map):
object_store = leveldb.LevelDB("%s/object.db" % self.data_dir)
w_batch = leveldb.WriteBatch()
for obj_id, obj in id_object_map:
obj_str = self.obj_to_str(obj)
obj_blob = self.compressHC(obj_str)
obj_key = numencode.encode_uint(obj_id)
w_batch.Put(obj_key, obj_blob)
object_store.Write(w_batch, sync=True)
logging.info("wrote %d objects" % len(id_object_map))
self.update_objnum(self.objnum)
def load_object(self, obj_id, obj_store):
obj_key = numencode.encode_uint(obj_id)
obj_blob = obj_store.Get(obj_key)
obj_str = self.decompress(obj_blob)
obj = self.str_to_obj(obj_str)
return obj
def write_terms(self, id_term_map, batch_size=64):
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
batch = []
term_id = 0
batch_key = 0
while term_id < len(id_term_map):
batch.append(id_term_map[term_id])
if term_id % batch_size == batch_size - 1:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
batch = []
batch_key += 1
term_id += 1
if len(batch) > 0:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
logging.info("wrote %d terms" % len(id_term_map))
def load_terms(self, batch_size=64):
id_term_map = dict()
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
for batch_key, batch_data in term_store.RangeIter():
batch = pickle.loads(self.decompress(batch_data))
batch_key = numencode.decode_uint(batch_key)
for i in xrange(len(batch)):
term_id = batch_key * batch_size + i
id_term_map[term_id] = batch[i]
logging.info("INDEX: LOADED %d TERMS" % len(id_term_map))
return id_term_map
def index_term(self, term, object_id, post_lists):
term_id = self.term_id_map.get(term, -1)
if term_id == -1:
term_id = len(self.term_id_map)
self.term_id_map[term] = term_id
self.id_term_map.append(term)
plist = post_lists.get(term_id, -1)
if plist == -1:
post_lists[term_id] = [object_id]
else:
plist.append(object_id)
def update_index(self, objects, cache_size=(200000, 80000000)):
post_lists = dict()
id_obj_map = []
cached = 0
logging.info("starting creating index")
for obj in objects:
terms = self.obj_to_terms(obj)
for term in terms:
self.index_term(term, self.objnum, post_lists)
cached += 1
if cached > cache_size[1]:
self.update_posting_lists(post_lists)
post_lists = dict()
cached = 0
id_obj_map.append((self.objnum, obj))
if len(id_obj_map) > cache_size[0]:
self.write_objects(id_obj_map)
id_obj_map = []
self.objnum += 1
self.write_objects(id_obj_map)
self.update_posting_lists(post_lists)
self.write_terms(self.id_term_map)
logging.info("index done")
def find(self, query_terms_cnf=None):
for query_terms in query_terms_cnf:
plist_store = leveldb.LevelDB("%s/plist.index" % self.data_dir)
object_store = leveldb.LevelDB("%s/object.db" % self.data_dir)
if query_terms is None:
continue
result_ids = set()
for query_term in query_terms:
term_id = self.term_id_map.get(query_term, -1)
logging.info("TERM ID: %d" % term_id)
if term_id == -1:
logging.info("TERM NOT FOUND IN DICTIONARY")
continue
plist = self.load_posting_list(term_id, plist_store)
result_ids.update(plist)
logging.info("RETRIEVING %d OBJECTS FROM DISK" % len(result_ids))
for obj_id in result_ids:
obj = self.load_object(obj_id, object_store)
yield obj
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ranker', '0008_auto_20150425_2101'),
]
operations = [
migrations.AddField(
model_name='project',
name='likert_label_1',
field=models.CharField(max_length=200, default='Strongly disagree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_2',
field=models.CharField(max_length=200, default='Disagree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_3',
field=models.CharField(max_length=200, default='Somewhat disagree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_4',
field=models.CharField(max_length=200, default='Neither agree nor disagree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_5',
field=models.CharField(max_length=200, default='Somewhat agree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_6',
field=models.CharField(max_length=200, default='Agree'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='likert_label_7',
field=models.CharField(max_length=200, default='Strongly agree'),
preserve_default=False,
),
]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
import time
import acos_client
from a10_neutron_lbaas import a10_exceptions as ex
from a10_neutron_lbaas.db import models
from a10_neutron_lbaas.vthunder import instance_initialization
from a10_neutron_lbaas.vthunder import instance_manager
import base
LOG = logging.getLogger(__name__)
# This next set of plumbing hooks needs to be used when the vthunder
# scheduler is active, for one vthunder per tenant.
class VThunderPerTenantPlumbingHooks(base.BasePlumbingHooks):
def get_a10_client(self, device_info, **kwargs):
if kwargs.get('action', None) == 'create':
retry = [errno.EHOSTUNREACH, errno.ECONNRESET, errno.ECONNREFUSED, errno.ETIMEDOUT]
return acos_client.Client(
device_info['host'], device_info['api_version'],
device_info['username'], device_info['password'],
port=device_info['port'], protocol=device_info['protocol'],
retry_errno_list=retry)
else:
return super(VThunderPerTenantPlumbingHooks, self).get_a10_client(device_info, **kwargs)
def _instance_manager(self, a10_context):
return instance_manager.InstanceManager.from_config(
self.driver.config, a10_context.openstack_context)
def _create_instance(self, tenant_id, a10_context, lbaas_obj, db_session):
start = time.time()
cfg = self.driver.config
vth = cfg.get_vthunder_config()
imgr = self._instance_manager(a10_context)
instance = imgr.create_device_instance(vth)
end = time.time()
LOG.debug("A10 vThunder %s: spawned after %d seconds", instance['nova_instance_id'],
end - start)
from a10_neutron_lbaas.etc import defaults
device_config = {}
for key in vth:
if key in ['status', 'ha_sync_list']:
continue
if key in defaults.DEVICE_REQUIRED_FIELDS or key in defaults.DEVICE_OPTIONAL_DEFAULTS:
device_config[key] = vth[key]
device_config.update({
'tenant_id': tenant_id,
'nova_instance_id': instance['nova_instance_id'],
'name': instance['name'],
'host': instance['ip_address'],
})
models.A10DeviceInstance.create_and_save(
db_session=db_session,
**device_config)
device_config.update({
'_perform_initialization': True
})
return device_config
def _wait_for_instance(self, device_config):
start = time.time()
client = self.get_a10_client(device_config)
client.wait_for_connect()
end = time.time()
# XXX(dougwig) - this is a <=4.1.0 after CM bug is fixed
time.sleep(5.0)
LOG.debug("A10 vThunder %s: ready to connect after %d seconds",
device_config['nova_instance_id'], end - start)
def select_device_with_lbaas_obj(self, tenant_id, a10_context, lbaas_obj,
db_session=None, **kwargs):
if not self.driver.config.get('use_database'):
raise ex.RequiresDatabase('vThunder orchestration requires use_database=True')
# If we already have a vThunder, use it.
# one vthunder per tenant
missing_instance = (
'A10 instance mapped to tenant %s is not present in db; '
'add it back to config or migrate loadbalancers' % tenant_id
)
tb = models.A10TenantBinding.find_by_tenant_id(tenant_id, db_session=db_session)
if tb is not None:
d = self.driver.config.get_device(tb.device_name, db_session=db_session)
if d is None:
LOG.error(missing_instance)
raise ex.InstanceMissing(missing_instance)
LOG.debug("select_device, returning cached instance %s", d)
return d
# No? Then we need to create one.
if kwargs.get('action') != 'create':
LOG.error(missing_instance)
raise ex.InstanceMissing(missing_instance)
device_config = self._create_instance(tenant_id, a10_context, lbaas_obj, db_session)
self._wait_for_instance(device_config)
# Now make sure that we remember where it is.
models.A10TenantBinding.create_and_save(
tenant_id=tenant_id,
device_name=device_config['name'],
db_session=db_session)
LOG.debug("select_device, returning new instance %s", device_config)
return device_config
def after_select_partition(self, a10_context):
instance = a10_context.device_cfg
client = a10_context.client
LOG.debug("after_select_partition, checking instance %s", instance)
if instance.get('_perform_initialization'):
instance_initialization.initialize_vthunder(
a10_context.a10_driver.config, instance, client)
def after_vip_create(self, a10_context, os_context, vip):
instance = a10_context.device_cfg
if 'nova_instance_id' not in instance:
raise ex.InternalError('Attempting virtual plumbing on non-virtual device')
if hasattr(vip, 'vip_address'):
vip_ip_address = vip.vip_address
vip_subnet_id = vip.vip_subnet_id
else:
vip_ip_address = vip['address']
vip_subnet_id = vip['subnet_id']
imgr = self._instance_manager(a10_context)
return imgr.plumb_instance_subnet(
instance['nova_instance_id'],
vip_subnet_id,
[vip_ip_address],
wrong_ips=[instance['host']])
|
import hiredis
import string
from .exceptions import InvalidData, ResponseError, ConnectionError
from .api import RedisApiMixin
from twisted.protocols.basic import LineReceiver
from twisted.protocols import policies
from twisted.python import log
from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
# Possible first characters in a string containing an integer or a float.
_NUM_FIRST_CHARS = frozenset(string.digits + "+-.")
class RedisProtocol(LineReceiver, policies.TimeoutMixin, RedisApiMixin):
"""
Redis client protocol.
"""
delimiter = '\r\n'
MAX_LENGTH = 16384
def __init__(self, charset="utf-8", errors="strict"):
self._reader = hiredis.Reader(
protocolError=InvalidData, replyError=ResponseError
)
self.charset = charset
self.errors = errors
self.bulk_length = 0
self.bulk_buffer = []
self.post_proc = []
self.replyQueue = DeferredQueue()
self.transactions = 0
self.inTransaction = False
self.unwatch_cc = lambda: ()
self.commit_cc = lambda: ()
self.script_hashes = set()
self.pipelining = False
self.pipelined_commands = []
self.pipelined_replies = []
@inlineCallbacks
def connectionMade(self):
if self.factory.password is not None:
try:
response = yield self.auth(self.factory.password)
if isinstance(response, ResponseError):
raise response
except Exception, e:
self.factory.continueTrying = False
self.transport.loseConnection()
msg = "Redis error: could not auth: %s" % (str(e))
self.factory.connectionError(msg)
if self.factory.isLazy:
log.msg(msg)
returnValue(None)
if self.factory.dbid is not None:
try:
response = yield self.select(self.factory.dbid)
if isinstance(response, ResponseError):
raise response
except Exception, e:
self.factory.continueTrying = False
self.transport.loseConnection()
msg = "Redis error: could not set dbid=%s: %s" % (
self.factory.dbid, str(e)
)
self.factory.connectionError(msg)
if self.factory.isLazy:
log.msg(msg)
returnValue(None)
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.script_hashes.clear()
self.factory.delConnection(self)
LineReceiver.connectionLost(self, why)
while self.replyQueue.waiting:
self.replyReceived(ConnectionError("Lost connection"))
def dataReceived(self, data, unpause=False):
self.resetTimeout()
if data:
self._reader.feed(data)
res = self._reader.gets()
while res is not False:
if isinstance(res, basestring):
res = self.tryConvertData(res)
elif isinstance(res, list):
res = map(self.tryConvertData, res)
if res == "QUEUED":
self.transactions += 1
else:
res = self.handleTransactionData(res)
self.replyReceived(res)
res = self._reader.gets()
def tryConvertData(self, data):
if not isinstance(data, str):
return data
el = None
if data and data[0] in _NUM_FIRST_CHARS: # Most likely a number
try:
el = int(data) if data.find('.') == -1 else float(data)
except ValueError:
pass
if el is None:
el = data
if self.charset is not None:
try:
el = data.decode(self.charset)
except UnicodeDecodeError:
pass
return el
def handleTransactionData(self, reply):
# watch or multi has been called
if self.inTransaction and isinstance(reply, list):
if self.transactions > 0:
# multi: this must be an exec [commit] reply
self.transactions -= len(reply)
if self.transactions == 0:
self.commit_cc()
# watch but no multi: process the reply as usual
if self.inTransaction:
f = self.post_proc[1:]
if len(f) == 1 and callable(f[0]):
reply = f[0](reply)
else: # multi: this must be an exec reply
tmp = []
for f, v in zip(self.post_proc[1:], reply):
if callable(f):
tmp.append(f(v))
else:
tmp.append(v)
reply = tmp
self.post_proc = []
return reply
def replyReceived(self, reply):
"""
Complete reply received and ready to be pushed to the requesting
function.
"""
self.replyQueue.put(reply)
@staticmethod
def handle_reply(r):
if isinstance(r, Exception):
raise r
return r
class MonitorProtocol(RedisProtocol):
"""
monitor has the same behavior as subscribe: hold the connection until
something happens.
take care with the performance impact: http://redis.io/commands/monitor
"""
def messageReceived(self, message):
pass
def replyReceived(self, reply):
self.messageReceived(reply)
def monitor(self):
return self.execute_command("MONITOR")
def stop(self):
self.transport.loseConnection()
class SubscriberProtocol(RedisProtocol):
def messageReceived(self, pattern, channel, message):
pass
def replyReceived(self, reply):
if isinstance(reply, list):
if reply[-3] == u"message":
self.messageReceived(None, *reply[-2:])
elif len(reply) > 3 and reply[-4] == u"pmessage":
self.messageReceived(*reply[-3:])
else:
self.replyQueue.put(reply[-3:])
elif isinstance(reply, Exception):
self.replyQueue.put(reply)
def subscribe(self, channels):
if isinstance(channels, (str, unicode)):
channels = [channels]
return self.execute_command("SUBSCRIBE", *channels)
def unsubscribe(self, channels):
if isinstance(channels, (str, unicode)):
channels = [channels]
return self.execute_command("UNSUBSCRIBE", *channels)
def psubscribe(self, patterns):
if isinstance(patterns, (str, unicode)):
patterns = [patterns]
return self.execute_command("PSUBSCRIBE", *patterns)
def punsubscribe(self, patterns):
if isinstance(patterns, (str, unicode)):
patterns = [patterns]
return self.execute_command("PUNSUBSCRIBE", *patterns)
|
from ..base import *
from ..conversions import *
from ..func_utils import *
from pyjsparser import parse
from ..byte_trans import ByteCodeGenerator, Code
def Function(this, args):
# convert arguments to python list of strings
a = map(to_string, tuple(args))
_body = u';'
_args = ()
if len(a):
_body = u'%s;' % a[-1]
_args = a[:-1]
return executable_function(_body, _args, args.space, global_context=True)
def executable_function(_body, _args, space, global_context=True):
func_str = u'(function (%s) { ; %s ; });' % (u', '.join(_args), _body)
co = executable_code(
code_str=func_str, space=space, global_context=global_context)
return co()
# you can use this one lovely piece of function to compile and execute code on the fly! Watch out though as it may generate lots of code.
# todo tape cleanup? we dont know which pieces are needed and which are not so rather impossible without smarter machinery something like GC,
# a one solution would be to have a separate tape for functions
def executable_code(code_str, space, global_context=True):
# parse first to check if any SyntaxErrors
parsed = parse(code_str)
old_tape_len = len(space.byte_generator.exe.tape)
space.byte_generator.record_state()
start = space.byte_generator.exe.get_new_label()
skip = space.byte_generator.exe.get_new_label()
space.byte_generator.emit('JUMP', skip)
space.byte_generator.emit('LABEL', start)
space.byte_generator.emit(parsed)
space.byte_generator.emit('NOP')
space.byte_generator.emit('LABEL', skip)
space.byte_generator.emit('NOP')
space.byte_generator.restore_state()
space.byte_generator.exe.compile(
start_loc=old_tape_len
) # dont read the code from the beginning, dont be stupid!
ctx = space.GlobalObj if global_context else space.exe.current_ctx
def ex_code():
ret, status, token = space.byte_generator.exe.execute_fragment_under_context(
ctx, start, skip)
# todo Clean up the tape!
# this is NOT a way to do that because the fragment may contain the executable code! We dont want to remove it
#del space.byte_generator.exe.tape[old_tape_len:]
if status == 0:
return ret
elif status == 3:
raise token
else:
raise RuntimeError(
'Unexpected return status during JIT execution: %d' % status)
return ex_code
def _eval(this, args):
code_str = to_string(get_arg(args, 0))
return executable_code(code_str, args.space, global_context=True)()
def log(this, args):
print(' '.join(map(to_string, args)))
return undefined
|
"""
Number theory functions.
"""
import sys
from functools import reduce
from itertools import count, islice
from math import sqrt, gcd
from operator import mul
def prod(seq):
return reduce(mul, seq, 1)
def is_prime(n):
if n < 2 or n%2==0:
return n==2
for m in range(3,int(sqrt(n))+1,2):
if n%m==0:
return False
return True
def primes_less_than(m):
primes = []
for n in range(2,m):
found_prime = True
for p in primes:
if p*p > n:
break
if n % p == 0:
found_prime = False
break
if found_prime:
primes.append(n)
return primes
def primes():
"""
Generate prime numbers
"""
primes = []
for n in count(2):
found_prime = True
for p in primes:
if p*p > n:
break
if n % p == 0:
found_prime = False
break
if found_prime:
primes.append(n)
yield n
def nth(seq, n):
return next(islice(seq, n-1, None))
def even(n):
return n%2 == 0
def odd(n):
return n%2 == 1
def factor(n):
"""
Factor an integer n returning a list of prime factors
"""
f = 2
fs = iter(range(3, int(sqrt(n))+1, 2))
factors = []
r = n
try:
while r > 1:
while r%f==0:
r = r//f
factors.append(f)
f = next(fs)
except StopIteration:
if r > 1:
factors.append(r)
return factors
def test_factor():
assert factor(100) == [2,2,5,5]
assert factor(23) == [23]
assert factor(871) == [13,67]
assert factor(40) == [2, 2, 2, 5]
assert factor(2*3*5*7*11*13*17*19*23*29*31) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
def collatz_sequence(n):
"""
The Collatz sequence for n is a generated by iterating:
a_n = (1/2) * a_n-1 if a_n-1 is even
a_n = 3*a_n-1 + 1 if a_n-1 is odd
...a sequence which is conjectured to always wind up at 1.
"""
s = []
x = n
while x>1:
s.append(x)
if x % 2 == 0:
x = x//2
else:
x = 3*x + 1
return s
def test_collatz_sequence():
for n in range(1,100):
print(collatz_sequence(n))
print(max(len(collatz_sequence(n)) for n in range(1000)))
def euler_phi(n):
return sum(gcd(n, k) == 1 for k in range(1, n + 1))
def order_g_mod_m(g, m):
for x in range(1, m):
if pow(g, x, m) == 1:
break
return x
def primitive_roots(m):
phi = euler_phi(m)
return [g for g in range(2, m) if order_g_mod_m(g, m) == phi]
def is_primitive_root(a, m):
return order_g_mod_m(a, m) == euler_phi(m)
def primitive_root_permutations(m):
phi = euler_phi(m)
return [
[pow(a, x, m) for x in range(1, m)]
for a in range(2, m)
if order_g_mod_m(a, m) == phi]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2014 Didotech Inc. (<http://www.didotech.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import re
from report import report_sxw
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from datetime import datetime
from core_extended.ordereddict import OrderedDict
from openerp.osv import orm
from openerp.tools.translate import _
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'invoice_tree': self._get_invoice_tree,
'invoice_origin': self._get_invoice_origin,
'italian_number': self._get_italian_number,
'invoice_move_lines': self._get_invoice_move_lines,
'ddt': self._get_ddt,
'set_picking': self._set_picking,
'indirizzo': self._indirizzo,
'div': self._div,
'line_description': self._line_description,
'desc_nocode': self._desc_nocode,
'total_fiscal': self._get_total_fiscal,
'total_tax_fiscal': self._get_total_tax_fiscal,
})
self.cache = {}
def _get_total_tax_fiscal(self, tax_line):
invoice = self.pool['account.invoice'].browse(self.cr, self.uid, self.ids[0])
amount_withholding = 0.0
for line in tax_line:
if line.tax_code_id.notprintable:
amount_withholding += line.tax_amount
if amount_withholding != 0.0:
if invoice.type in ['out_invoice', 'in_invoice']:
return invoice.amount_tax - amount_withholding
else:
return invoice.amount_tax + amount_withholding
return invoice.amount_tax
def _get_total_fiscal(self, tax_line):
invoice = self.pool['account.invoice'].browse(self.cr, self.uid, self.ids[0])
amount_withholding = 0.0
for line in tax_line:
if line.tax_code_id.notprintable:
amount_withholding += line.tax_amount
if amount_withholding != 0.0:
if invoice.type in ['out_invoice', 'in_invoice']:
return invoice.amount_total - amount_withholding
else:
return invoice.amount_total + amount_withholding
return invoice.amount_total
def _desc_nocode(self, string):
return re.compile('\[.*\]\ ').sub('', string)
def _line_description(self, line):
sale_order_line_obj = self.pool['sale.order.line']
stock_picking_obj = self.pool['stock.picking']
description = []
if line.name:
description.append(re.compile('\[.*\]\ ').sub('', line.name))
if line.note:
description.append(line.note)
return '\n'.join(description)
def _div(self, up, down):
res = 0
#import pdb; pdb.set_trace()
if down:
res = up / down
return res
def _set_picking(self, invoice):
self._get_invoice_tree(invoice.invoice_line)
return False
def _get_ddt(self):
def get_picking(picking_name):
picking_ids = self.pool['stock.picking'].search(self.cr, self.uid, [('name', '=', picking_name)])
if picking_ids:
return self.pool['stock.picking'].browse(self.cr, self.uid, picking_ids[0])
invoice = self.pool['account.invoice'].browse(self.cr, self.uid, self.ids[0])
if hasattr(invoice, 'move_products') and invoice.move_products:
return self.pool['account.invoice'].browse(self.cr, self.uid, self.ids[0])
if hasattr(self, 'picking_name'):
return self.cache.get(self.picking_name, False) or self.cache.setdefault(self.picking_name, get_picking(self.picking_name))
return False
def _get_italian_number(self, number, precision=2, no_zero=False):
if not number and no_zero:
return ''
elif not number:
return '0,00'
if number < 0:
sign = '-'
else:
sign = ''
## Requires Python >= 2.7:
#before, after = "{:.{digits}f}".format(number, digits=precision).split('.')
## Works with Python 2.6:
if precision:
before, after = "{0:10.{digits}f}".format(number, digits=precision).strip('- ').split('.')
else:
before = "{0:10.{digits}f}".format(number, digits=precision).strip('- ').split('.')[0]
after = ''
belist = []
end = len(before)
for i in range(3, len(before) + 3, 3):
start = len(before) - i
if start < 0:
start = 0
belist.append(before[start: end])
end = len(before) - i
before = '.'.join(reversed(belist))
if no_zero and int(number) == float(number) or precision == 0:
return sign + before
else:
return sign + before + ',' + after
def get_reference(self, order_name):
order_obj = self.pool['sale.order']
description = []
if order_name:
order_ids = order_obj.search(self.cr, self.uid, [('name', '=', order_name)])
if len(order_ids) == 1:
order = order_obj.browse(self.cr, self.uid, order_ids[0])
if order.client_order_ref:
order_date = datetime.strptime(order.date_order, DEFAULT_SERVER_DATE_FORMAT)
description.append(u'{client_order} del {order_date}'.format(client_order=order.client_order_ref, order_date=order_date.strftime("%d/%m/%Y")))
return '\n'.join(description)
def get_description(self, ddt_name, order_name):
ddt_obj = self.pool['stock.picking']
order_obj = self.pool['sale.order']
description = []
if ddt_name:
ddt_ids = ddt_obj.search(self.cr, self.uid, [('name', '=', ddt_name)])
if len(ddt_ids) == 1:
ddt = ddt_obj.browse(self.cr, self.uid, ddt_ids[0])
if ddt.ddt_number:
ddt_date = datetime.strptime(ddt.ddt_date, DEFAULT_SERVER_DATE_FORMAT)
## Ex: Rif. Ns. DDT 2012/0335
description.append(u'Rif. Ns. DDT {ddt} del {ddt_date}'.format(ddt=ddt.ddt_number, ddt_date=ddt_date.strftime("%d/%m/%Y")))
if order_name and not self.pool['res.users'].browse(
self.cr, self.uid, self.uid).company_id.disable_sale_ref_invoice_report:
order_ids = order_obj.search(self.cr, self.uid, [('name', '=', order_name)])
if len(order_ids) == 1:
order = order_obj.browse(self.cr, self.uid, order_ids[0])
order_date = datetime.strptime(order.date_order, DEFAULT_SERVER_DATE_FORMAT)
if order.client_order_ref:
description.append(u'Rif. Ns. Ordine {order} del {order_date}, Vs. Ordine {client_order}'.format(order=order.name, order_date=order_date.strftime("%d/%m/%Y"), client_order=order.client_order_ref))
else:
description.append(u'Rif. Ns. Ordine {order} del {order_date}'.format(order=order.name, order_date=order_date.strftime("%d/%m/%Y")))
return ' / '.join(description)
def _get_picking_name(self, line):
picking_obj = self.pool['stock.picking']
picking_ids = picking_obj.search(self.cr, self.uid, [('origin', '=', line.origin)])
if len(picking_ids) == 1:
picking = picking_obj.browse(self.cr, self.uid, picking_ids[0])
return picking.name
elif picking_ids:
move_obj = self.pool['stock.move']
move_ids = move_obj.search(self.cr, self.uid, [('product_id', '=', line.product_id.id), ('origin', '=', line.origin)])
if len(move_ids) == 1:
stock_move = move_obj.browse(self.cr, self.uid, move_ids[0])
if stock_move.picking_id:
return stock_move.picking_id.name
else:
return False
elif move_ids:
# The same product from the same sale_order is present in various picking lists
raise orm.except_orm('Warning', _('Ambiguous line origin'))
else:
return False
else:
return False
def _get_invoice_origin(self, invoice_lines, type, source_data = 'line'):
origins = []
sale_orders = []
stock_pickings = []
picking_obj = self.pool['stock.picking']
sale_order_obj = self.pool['sale.order']
description = []
# group origins
for invoice_line in invoice_lines:
if source_data == 'line':
if invoice_line.origin not in origins:
origins.append(invoice_line.origin)
elif source_data == 'invoice':
for invoice_origin in invoice_line.invoice_id.origin.split(', '):
if invoice_origin not in origins:
origins.append(invoice_origin)
for origin in origins:
if ':' in origin:
split_list = origin.split(':')
ddt, sale_order = split_list[0], split_list[1]
elif origin[:4] == 'OUT/':
ddt = origin
sale_order = False
elif origin[:4] == 'IN/':
ddt = False
sale_order = False
else:
ddt = False
sale_order = origin
if type == 'ddt':
if ddt not in stock_pickings:
stock_pickings.append(ddt)
picking_ids = picking_obj.search(self.cr, self.uid, [('name', '=', ddt)])
if picking_ids:
ddt = picking_obj.browse(self.cr, self.uid, picking_ids[0])
ddt_date = datetime.strptime(ddt.ddt_date, DEFAULT_SERVER_DATE_FORMAT)
description.append(u'Ns. DDT {ddt} del {ddt_date}'.format(ddt=ddt.ddt_number, ddt_date=ddt_date.strftime("%d/%m/%Y")))
else:
if sale_order not in sale_orders:
sale_orders.append(sale_order)
sale_order_ids = sale_order_obj.search(self.cr, self.uid, [('name', '=', sale_order)])
if sale_order_ids:
order = sale_order_obj.browse(self.cr, self.uid, sale_order_ids[0])
# order_date = datetime.strptime(order.date_order, DEFAULT_SERVER_DATE_FORMAT)
description.append(u'Vs. Ordine {client_order}'.format(client_order=order.client_order_ref))
return '\n'.join(description)
def _get_invoice_tree(self, invoice_lines):
invoice = {}
keys = {}
picking_obj = self.pool['stock.picking']
sale_order_obj = self.pool['sale.order']
no_tree = False
for line in invoice_lines:
if line.origin:
if ':' in line.origin:
split_list = line.origin.split(':')
ddt, sale_order = split_list[0], split_list[1]
if line.invoice_id.direct_invoice:
self.picking_name = ddt
ddt = False
elif line.origin[:4] == 'OUT/':
if line.invoice_id.direct_invoice:
self.picking_name = line.origin
ddt = False
else:
ddt = line.origin
sale_order = False
elif line.origin[:4] == 'IN/':
ddt = False
sale_order = False
elif line.invoice_id.direct_invoice:
print line.origin
ddt = False
sale_order = line.origin
self.picking_name = self._get_picking_name(line)
#if isinstance(self.picking_name, (list, tuple)):
else:
ddt = False
sale_order = line.origin
else:
ddt = False
sale_order = False
# Order lines by date and by ddt, so first create date_ddt key:
if ddt:
if ddt in keys:
key = keys[ddt]
else:
picking_ids = picking_obj.search(self.cr, self.uid, [('name', '=', ddt)])
if picking_ids:
picking = picking_obj.browse(self.cr, self.uid, picking_ids[0])
key = "{0}_{1}".format(picking.ddt_date, ddt)
else:
key = ddt
elif sale_order:
if sale_order in keys:
key = keys[sale_order]
else:
sale_order_ids = sale_order_obj.search(self.cr, self.uid, [('name', '=', sale_order)])
if sale_order_ids:
sale = sale_order_obj.browse(self.cr, self.uid, sale_order_ids[0])
key = "{0}_{1}".format(sale.date_order, sale)
else:
key = sale_order
else:
key = False
if key in invoice:
invoice[key]['lines'].append(line)
else:
description = self.get_description(ddt, sale_order)
customer_ref = self.get_reference(sale_order)
invoice[key] = {'description': description, 'origin': customer_ref, 'lines': [line]}
if no_tree:
description = self._get_invoice_origin(invoice_lines, 'ddt', 'invoice')
invoice[False]['description'] = description
return OrderedDict(sorted(invoice.items(), key=lambda t: t[0])).values()
def _get_invoice_move_lines(self, move_id):
if move_id.line_id:
return [line for line in move_id.line_id if line.date_maturity]
else:
return []
def _indirizzo(self, partner):
address = self.pool['res.partner'].address_get(self.cr, self.uid, [partner.parent_id and partner.parent_id.id or partner.id], ['default', 'invoice'])
return self.pool['res.partner.address'].browse(self.cr, self.uid, address['invoice'] or address['default'])
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveRouteListResult(Model):
"""Response for list effective route API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: A list of effective routes.
:type value: list[~azure.mgmt.network.v2017_11_01.models.EffectiveRoute]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveRoute]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EffectiveRouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
|
from Plugins.Plugin import PluginDescriptor
#------------------------------------------------------------------------------------------
def Pic_Thumb(*args, **kwa):
import ui
return ui.Pic_Thumb(*args, **kwa)
def picshow(*args, **kwa):
import ui
return ui.picshow(*args, **kwa)
def main(session, **kwargs):
from ui import picshow
session.open(picshow)
def filescan_open(list, session, **kwargs):
# Recreate List as expected by PicView
filelist = [((file.path, False), None) for file in list]
from ui import Pic_Full_View
session.open(Pic_Full_View, filelist, 0, file.path)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
import os
# Overwrite checkFile to only detect local
class LocalScanner(Scanner):
def checkFile(self, file):
return os.path.exists(file.path)
return \
LocalScanner(mimetypes = ["image/jpeg", "image/png", "image/gif", "image/bmp"],
paths_to_scan =
[
ScanPath(path = "DCIM", with_subdirs = True),
ScanPath(path = "", with_subdirs = False),
],
name = "Pictures",
description = _("View Photos..."),
openfnc = filescan_open,
)
def Plugins(**kwargs):
return \
[PluginDescriptor(name=_("PicturePlayer"), description=_("fileformats (BMP, PNG, JPG, GIF)"), icon="pictureplayer.png", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=main),
PluginDescriptor(name=_("PicturePlayer"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan)]
|
from __future__ import print_function
import os
import os.path as op
from shutil import copyfile
import numpy as np
from scipy import sparse
import pytest
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.datasets import testing
from mne import read_surface, write_surface, decimate_surface
from mne.surface import (read_morph_map, _compute_nearest,
fast_cross_3d, get_head_surf, read_curvature,
get_meg_helmet_surf)
from mne.utils import (_TempDir, requires_mayavi, requires_tvtk,
run_tests_if_main, object_diff, traits_test)
from mne.io import read_info
from mne.transforms import _get_trans
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
rng = np.random.RandomState(0)
def test_helmet():
"""Test loading helmet surfaces."""
base_dir = op.join(op.dirname(__file__), '..', 'io')
fname_raw = op.join(base_dir, 'tests', 'data', 'test_raw.fif')
fname_kit_raw = op.join(base_dir, 'kit', 'tests', 'data',
'test_bin_raw.fif')
fname_bti_raw = op.join(base_dir, 'bti', 'tests', 'data',
'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
fname_trans = op.join(base_dir, 'tests', 'data',
'sample-audvis-raw-trans.txt')
trans = _get_trans(fname_trans)[0]
for fname in [fname_raw, fname_kit_raw, fname_bti_raw, fname_ctf_raw]:
helmet = get_meg_helmet_surf(read_info(fname), trans)
assert_equal(len(helmet['rr']), 304) # they all have 304 verts
assert_equal(len(helmet['rr']), len(helmet['nn']))
@testing.requires_testing_data
def test_head():
"""Test loading the head surface."""
surf_1 = get_head_surf('sample', subjects_dir=subjects_dir)
surf_2 = get_head_surf('sample', 'head', subjects_dir=subjects_dir)
assert len(surf_1['rr']) < len(surf_2['rr']) # BEM vs dense head
pytest.raises(TypeError, get_head_surf, subject=None,
subjects_dir=subjects_dir)
def test_huge_cross():
"""Test cross product with lots of elements."""
x = rng.rand(100000, 3)
y = rng.rand(1, 3)
z = np.cross(x, y)
zz = fast_cross_3d(x, y)
assert_array_equal(z, zz)
def test_compute_nearest():
"""Test nearest neighbor searches."""
x = rng.randn(500, 3)
x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None]
nn_true = rng.permutation(np.arange(500, dtype=np.int))[:20]
y = x[nn_true]
nn1 = _compute_nearest(x, y, method='BallTree')
nn2 = _compute_nearest(x, y, method='cKDTree')
nn3 = _compute_nearest(x, y, method='cdist')
assert_array_equal(nn_true, nn1)
assert_array_equal(nn_true, nn2)
assert_array_equal(nn_true, nn3)
# test distance support
nnn1 = _compute_nearest(x, y, method='BallTree', return_dists=True)
nnn2 = _compute_nearest(x, y, method='cKDTree', return_dists=True)
nnn3 = _compute_nearest(x, y, method='cdist', return_dists=True)
assert_array_equal(nnn1[0], nn_true)
assert_array_equal(nnn1[1], np.zeros_like(nn1)) # all dists should be 0
assert_equal(len(nnn1), len(nnn2))
for nn1, nn2, nn3 in zip(nnn1, nnn2, nnn3):
assert_array_equal(nn1, nn2)
assert_array_equal(nn1, nn3)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_morph_maps():
"""Test reading and creating morph maps."""
# make a new fake subjects_dir
tempdir = _TempDir()
for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
os.mkdir(op.join(tempdir, subject))
os.mkdir(op.join(tempdir, subject, 'surf'))
regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
for hemi in ['lh', 'rh']:
for reg in regs:
args = [subject, 'surf', hemi + '.sphere.' + reg]
copyfile(op.join(subjects_dir, *args),
op.join(tempdir, *args))
for subject_from, subject_to, xhemi in (
('fsaverage_ds', 'sample_ds', False),
('fsaverage_ds', 'fsaverage_ds', True)):
# trigger the creation of morph-maps dir and create the map
with pytest.warns(None):
mmap = read_morph_map(subject_from, subject_to, tempdir,
xhemi=xhemi)
mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
xhemi=xhemi)
assert_equal(len(mmap), len(mmap2))
for m1, m2 in zip(mmap, mmap2):
# deal with sparse matrix stuff
diff = (m1 - m2).data
assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
# This will also trigger creation, but it's trivial
with pytest.warns(None):
mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
for mm in mmap:
assert (mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0
@testing.requires_testing_data
def test_io_surface():
"""Test reading and writing of Freesurfer surface mesh files."""
tempdir = _TempDir()
fname_quad = op.join(data_path, 'subjects', 'bert', 'surf',
'lh.inflated.nofix')
fname_tri = op.join(data_path, 'subjects', 'fsaverage', 'surf',
'lh.inflated')
for fname in (fname_quad, fname_tri):
with pytest.warns(None): # no volume info
pts, tri, vol_info = read_surface(fname, read_metadata=True)
write_surface(op.join(tempdir, 'tmp'), pts, tri, volume_info=vol_info)
with pytest.warns(None): # no volume info
c_pts, c_tri, c_vol_info = read_surface(op.join(tempdir, 'tmp'),
read_metadata=True)
assert_array_equal(pts, c_pts)
assert_array_equal(tri, c_tri)
assert_equal(object_diff(vol_info, c_vol_info), '')
@testing.requires_testing_data
def test_read_curv():
"""Test reading curvature data."""
fname_curv = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')
fname_surf = op.join(data_path, 'subjects', 'fsaverage', 'surf',
'lh.inflated')
bin_curv = read_curvature(fname_curv)
rr = read_surface(fname_surf)[0]
assert len(bin_curv) == len(rr)
assert np.logical_or(bin_curv == 0, bin_curv == 1).all()
@requires_tvtk
@requires_mayavi
@traits_test
def test_decimate_surface():
"""Test triangular surface decimation."""
points = np.array([[-0.00686118, -0.10369860, 0.02615170],
[-0.00713948, -0.10370162, 0.02614874],
[-0.00686208, -0.10368247, 0.02588313],
[-0.00713987, -0.10368724, 0.02587745]])
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]])
for n_tri in [4, 3, 2]: # quadric decimation creates even numbered output.
_, this_tris = decimate_surface(points, tris, n_tri)
assert len(this_tris) == n_tri if not n_tri % 2 else 2
nirvana = 5
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])
pytest.raises(ValueError, decimate_surface, points, tris, n_tri)
run_tests_if_main()
|
import argparse
import contextlib
import copy
import errno
import os
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
from distutils.ccompiler import CCompiler
from distutils.dist import Distribution
from types import TracebackType
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from setuptools import Extension
from setuptools.command.build_ext import build_ext as _build_ext
def rmtree(path: str) -> None:
"""Newer golang uses readonly dirs & files for module cache."""
def handle_remove_readonly(
func: Callable[..., Any],
path: str,
exc: Tuple[Type[OSError], OSError, TracebackType],
) -> None:
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
for p in (path, os.path.dirname(path)):
os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
@contextlib.contextmanager
def _tmpdir() -> Generator[str, None, None]:
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def _get_cflags(
compiler: CCompiler,
macros: Sequence[Tuple[str, Optional[str]]],
) -> str:
args = [f'-I{p}' for p in compiler.include_dirs]
for macro_name, macro_value in macros:
if macro_value is None:
args.append(f'-D{macro_name}')
else:
args.append(f'-D{macro_name}={macro_value}')
return ' '.join(args)
LFLAG_CLANG = '-Wl,-undefined,dynamic_lookup'
LFLAG_GCC = '-Wl,--unresolved-symbols=ignore-all'
LFLAGS = (LFLAG_CLANG, LFLAG_GCC)
def _get_ldflags() -> str:
"""Determine the correct link flags. This attempts compiles similar
to how autotools does feature detection.
"""
# windows gcc does not support linking with unresolved symbols
if sys.platform == 'win32': # pragma: no cover (windows)
prefix = getattr(sys, 'real_prefix', sys.prefix)
libs = os.path.join(prefix, 'libs')
return '-L{} -lpython{}{}'.format(libs, *sys.version_info[:2])
cc = subprocess.check_output(('go', 'env', 'CC')).decode('UTF-8').strip()
with _tmpdir() as tmpdir:
testf = os.path.join(tmpdir, 'test.c')
with open(testf, 'w') as f:
f.write('int f(int); int main(void) { return f(0); }\n')
for lflag in LFLAGS: # pragma: no cover (platform specific)
try:
subprocess.check_call((cc, testf, lflag), cwd=tmpdir)
return lflag
except subprocess.CalledProcessError:
pass
else: # pragma: no cover (platform specific)
# wellp, none of them worked, fall back to gcc and they'll get a
# hopefully reasonable error message
return LFLAG_GCC
def _check_call(cmd: Tuple[str, ...], cwd: str, env: Dict[str, str]) -> None:
envparts = [f'{k}={shlex.quote(v)}' for k, v in sorted(tuple(env.items()))]
print(
'$ {}'.format(' '.join(envparts + [shlex.quote(p) for p in cmd])),
file=sys.stderr,
)
subprocess.check_call(cmd, cwd=cwd, env=dict(os.environ, **env))
def _get_build_extension_method(
base: Type[_build_ext],
root: str,
) -> Callable[[_build_ext, Extension], None]:
def build_extension(self: _build_ext, ext: Extension) -> None:
# If there are no .go files then the parent should handle this
if not any(source.endswith('.go') for source in ext.sources):
# the base class may mutate `self.compiler`
compiler = copy.deepcopy(self.compiler)
self.compiler, compiler = compiler, self.compiler
try:
return base.build_extension(self, ext)
finally:
self.compiler, compiler = compiler, self.compiler
if len(ext.sources) != 1:
raise OSError(
f'Error building extension `{ext.name}`: '
f'sources must be a single file in the `main` package.\n'
f'Recieved: {ext.sources!r}',
)
main_file, = ext.sources
if not os.path.exists(main_file):
raise OSError(
f'Error building extension `{ext.name}`: '
f'{main_file} does not exist',
)
main_dir = os.path.dirname(main_file)
# Copy the package into a temporary GOPATH environment
with _tmpdir() as tempdir:
root_path = os.path.join(tempdir, 'src', root)
# Make everything but the last directory (copytree interface)
os.makedirs(os.path.dirname(root_path))
shutil.copytree('.', root_path, symlinks=True)
pkg_path = os.path.join(root_path, main_dir)
gopath = os.environ.get('SETUPTOOLS_GOLANG_GOPATH', tempdir)
env = {'GOPATH': gopath}
cmd_get = ('go', 'get', '-d')
_check_call(cmd_get, cwd=pkg_path, env=env)
env.update({
'CGO_CFLAGS': _get_cflags(
self.compiler, ext.define_macros or (),
),
'CGO_LDFLAGS': _get_ldflags(),
})
cmd_build = (
'go', 'build', '-buildmode=c-shared',
'-o', os.path.abspath(self.get_ext_fullpath(ext.name)),
)
_check_call(cmd_build, cwd=pkg_path, env=env)
return build_extension
def _get_build_ext_cls(base: Type[_build_ext], root: str) -> Type[_build_ext]:
attrs = {'build_extension': _get_build_extension_method(base, root)}
return type('build_ext', (base,), attrs)
def set_build_ext(
dist: Distribution,
attr: str,
value: Dict[str, str],
) -> None:
root = value['root']
base = dist.cmdclass.get('build_ext', _build_ext)
dist.cmdclass['build_ext'] = _get_build_ext_cls(base, root)
GOLANG = 'https://storage.googleapis.com/golang/go{}.linux-amd64.tar.gz'
SCRIPT = '''\
cd /tmp
curl {golang} --silent --location | tar -xz
export PATH="/tmp/go/bin:$PATH" HOME=/tmp
for py in {pythons}; do
"/opt/python/$py/bin/pip" wheel --no-deps --wheel-dir /tmp /dist/*.tar.gz
done
ls *.whl | xargs -n1 --verbose auditwheel repair --wheel-dir /dist
ls -al /dist
'''
def build_manylinux_wheels(
argv: Optional[Sequence[str]] = None,
) -> int: # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument(
'--golang', default='1.14.8',
help='Override golang version (default %(default)s)',
)
parser.add_argument(
'--pythons', default='cp36-cp36m,cp37-cp37m,cp38-cp38,cp39-cp39',
help='Override pythons to build (default %(default)s)',
)
args = parser.parse_args(argv)
golang = GOLANG.format(args.golang)
pythons = ' '.join(args.pythons.split(','))
assert os.path.exists('setup.py')
if os.path.exists('dist'):
shutil.rmtree('dist')
os.makedirs('dist')
_check_call(('python', 'setup.py', 'sdist'), cwd='.', env={})
_check_call(
(
'docker', 'run', '--rm',
'--volume', f'{os.path.abspath("dist")}:/dist:rw',
'--user', f'{os.getuid()}:{os.getgid()}',
'quay.io/pypa/manylinux1_x86_64:latest',
'bash', '-o', 'pipefail', '-euxc',
SCRIPT.format(golang=golang, pythons=pythons),
),
cwd='.', env={},
)
print('*' * 79)
print('Your wheels have been built into ./dist')
print('*' * 79)
return 0
|
# -*- coding: utf-8 -*-
import unittest
import sys, os.path
import bottle
import urllib2
from StringIO import StringIO
import thread
import time
from tools import ServerTestBase, tob
class TestWsgi(ServerTestBase):
''' Tests for WSGI functionality, routing and output casting (decorators) '''
def test_get(self):
""" WSGI: GET routes"""
@bottle.route('/')
def test(): return 'test'
self.assertStatus(404, '/not/found')
self.assertStatus(405, '/', post="var=value")
self.assertBody('test', '/')
def test_post(self):
""" WSGI: POST routes"""
@bottle.route('/', method='POST')
def test(): return 'test'
self.assertStatus(404, '/not/found')
self.assertStatus(405, '/')
self.assertBody('test', '/', post="var=value")
def test_headget(self):
""" WSGI: HEAD routes and GET fallback"""
@bottle.route('/get')
def test(): return 'test'
@bottle.route('/head', method='HEAD')
def test2(): return 'test'
# GET -> HEAD
self.assertStatus(405, '/head')
# HEAD -> HEAD
self.assertStatus(200, '/head', method='HEAD')
self.assertBody('', '/head', method='HEAD')
# HEAD -> GET
self.assertStatus(200, '/get', method='HEAD')
self.assertBody('', '/get', method='HEAD')
def get304(self):
""" 304 responses must not return entity headers """
bad = ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified') # + c-location, expires?
for h in bad:
bottle.response.set_header(h, 'foo')
bottle.status = 304
for h, v in bottle.response.headerlist:
self.assertFalse(h.lower() in bad, "Header %s not deleted" % h)
def test_anymethod(self):
self.assertStatus(404, '/any')
@bottle.route('/any', method='ANY')
def test2(): return 'test'
self.assertStatus(200, '/any', method='HEAD')
self.assertBody('test', '/any', method='GET')
self.assertBody('test', '/any', method='POST')
self.assertBody('test', '/any', method='DELETE')
@bottle.route('/any', method='GET')
def test2(): return 'test2'
self.assertBody('test2', '/any', method='GET')
@bottle.route('/any', method='POST')
def test2(): return 'test3'
self.assertBody('test3', '/any', method='POST')
self.assertBody('test', '/any', method='DELETE')
def test_500(self):
""" WSGI: Exceptions within handler code (HTTP 500) """
@bottle.route('/')
def test(): return 1/0
self.assertStatus(500, '/')
def test_503(self):
""" WSGI: Server stopped (HTTP 503) """
@bottle.route('/')
def test(): return 'bla'
self.assertStatus(200, '/')
bottle.app().serve = False
self.assertStatus(503, '/')
def test_401(self):
""" WSGI: abort(401, '') (HTTP 401) """
@bottle.route('/')
def test(): bottle.abort(401)
self.assertStatus(401,'/')
@bottle.error(401)
def err(e):
bottle.response.status = 200
return str(type(e))
self.assertStatus(200,'/')
self.assertBody("<class 'bottle.HTTPError'>",'/')
def test_303(self):
""" WSGI: redirect (HTTP 303) """
@bottle.route('/')
def test(): bottle.redirect('/yes')
self.assertStatus(303, '/')
self.assertHeader('Location', 'http://127.0.0.1/yes', '/')
def test_generator_callback(self):
@bottle.route('/yield')
def test():
bottle.response.headers['Test-Header'] = 'test'
yield 'foo'
@bottle.route('/yield_nothing')
def test2():
yield
bottle.response.headers['Test-Header'] = 'test'
self.assertBody('foo', '/yield')
self.assertHeader('Test-Header', 'test', '/yield')
self.assertBody('', '/yield_nothing')
self.assertHeader('Test-Header', 'test', '/yield_nothing')
def test_cookie(self):
""" WSGI: Cookies """
@bottle.route('/cookie')
def test():
bottle.response.COOKIES['a']="a"
bottle.response.set_cookie('b', 'b')
bottle.response.set_cookie('c', 'c', path='/')
return 'hello'
try:
c = self.urlopen('/cookie')['header'].get_all('Set-Cookie', '')
except:
c = self.urlopen('/cookie')['header'].get('Set-Cookie', '').split(',')
c = [x.strip() for x in c]
self.assertTrue('a=a' in c)
self.assertTrue('b=b' in c)
self.assertTrue('c=c; Path=/' in c)
class TestDecorators(ServerTestBase):
''' Tests Decorators '''
def test_view(self):
""" WSGI: Test view-decorator (should override autojson) """
@bottle.route('/tpl')
@bottle.view('stpl_t2main')
def test():
return dict(content='1234')
result = '+base+\n+main+\n!1234!\n+include+\n-main-\n+include+\n-base-\n'
self.assertHeader('Content-Type', 'text/html; charset=UTF-8', '/tpl')
self.assertBody(result, '/tpl')
def test_view_error(self):
""" WSGI: Test if view-decorator reacts on non-dict return values correctly."""
@bottle.route('/tpl')
@bottle.view('stpl_t2main')
def test():
return bottle.HTTPError(401, 'The cake is a lie!')
self.assertInBody('The cake is a lie!', '/tpl')
self.assertInBody('401: Unauthorized', '/tpl')
self.assertStatus(401, '/tpl')
def test_validate(self):
""" WSGI: Test validate-decorator"""
@bottle.route('/:var')
@bottle.route('/')
@bottle.validate(var=int)
def test(var): return 'x' * var
self.assertStatus(403,'/noint')
self.assertStatus(403,'/')
self.assertStatus(200,'/5')
self.assertBody('xxx', '/3')
def test_truncate_body(self):
""" WSGI: Some HTTP status codes must not be used with a response-body """
@bottle.route('/test/:code')
def test(code):
bottle.response.status = int(code)
return 'Some body content'
self.assertBody('Some body content', '/test/200')
self.assertBody('', '/test/100')
self.assertBody('', '/test/101')
self.assertBody('', '/test/204')
self.assertBody('', '/test/304')
def test_routebuild(self):
""" WSGI: Test route builder """
bottle.route('/a/:b/c', name='named')(5)
bottle.request.environ['SCRIPT_NAME'] = ''
self.assertEqual('/a/xxx/c', bottle.url('named', b='xxx'))
self.assertEqual('/a/xxx/c', bottle.app().get_url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = '/app'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = '/app/'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = 'app/'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
def test_decorators(self):
app = bottle.Bottle()
app.route('/g')('foo')
bottle.route('/g')('foo')
app.route('/g2', method='GET')('foo')
bottle.get('/g2')('foo')
app.route('/p', method='POST')('foo')
bottle.post('/p')('foo')
app.route('/p2', method='PUT')('foo')
bottle.put('/p2')('foo')
app.route('/d', method='DELETE')('foo')
bottle.delete('/d')('foo')
self.assertEqual(app.routes, bottle.app().routes)
def test_autoroute(self):
app = bottle.Bottle()
def a(): pass
def b(x): pass
def c(x, y): pass
def d(x, y=5): pass
def e(x=5, y=6): pass
self.assertEqual(['a'],list(bottle.yieldroutes(a)))
self.assertEqual(['b/:x'],list(bottle.yieldroutes(b)))
self.assertEqual(['c/:x/:y'],list(bottle.yieldroutes(c)))
self.assertEqual(['d/:x','d/:x/:y'],list(bottle.yieldroutes(d)))
self.assertEqual(['e','e/:x','e/:x/:y'],list(bottle.yieldroutes(e)))
class TestAppMounting(ServerTestBase):
def setUp(self):
ServerTestBase.setUp(self)
self.subapp = bottle.Bottle()
def test_basicmounting(self):
bottle.app().mount(self.subapp, '/test')
self.assertStatus(404, '/')
self.assertStatus(404, '/test')
self.assertStatus(404, '/test/')
self.assertStatus(404, '/test/test/bar')
@self.subapp.route('/')
@self.subapp.route('/test/:test')
def test(test='foo'):
return test
self.assertStatus(404, '/')
self.assertStatus(404, '/test')
self.assertStatus(200, '/test/')
self.assertBody('foo', '/test/')
self.assertStatus(200, '/test/test/bar')
self.assertBody('bar', '/test/test/bar')
if __name__ == '__main__':
unittest.main()
|
"""This file is used for pip install"""
from setuptools import setup, find_packages
from proj_info import SHORT_NAME, VERSION, ABOUT_URL, AUTHOR, LICENSE_TYPE, \
SHORT_DESCRIPTION, LONG_DESCRIPTION
setup(
name=SHORT_NAME,
version=VERSION,
url=ABOUT_URL,
license=LICENSE_TYPE,
author=AUTHOR,
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages('.'),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'flask',
'numpy',
'scikit-learn',
'scipy',
'pandas',
'gensim',
'beautifulsoup4',
'lxml',
'matplotlib',
'chardet',
'natsort',
'plotly'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education :: Text Processing :: Utilities'
],
entry_points='''
[console_scripts]
lexos=lexos.application:run
'''
)
|
"""
KBPO internal models.
"""
import os
from django.core.files import File
from django.db import models
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.contrib.postgres.search import SearchVector
from kbpo import api
from .fields import SpanField, ScoreField
CORPUS_NAMES = {
"kbp2014": "TAC-KBP 2014 corpus",
"kbp2015": "TAC-KBP 2015 corpus",
"kbp2016": "TAC-KBP 2016 corpus",
}
## Corpus
class CorpusState(models.Model):
corpus_tag = models.TextField()
state = models.TextField()
class Meta:
managed = False
db_table = 'corpus_state'
class Document(models.Model):
id = models.TextField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
title = models.TextField(blank=True, null=True)
doc_date = models.DateField(blank=True, null=True)
doc_length = models.IntegerField(blank=True, null=True)
doc_digest = models.TextField(blank=True, null=True)
gloss = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'document'
class DocumentTag(models.Model):
doc = models.OneToOneField(Document, primary_key=True)
tag = models.TextField()
class Meta:
managed = False
db_table = 'document_tag'
unique_together = (('doc', 'tag'),)
class DocumentIndex(models.Model):
doc_id = models.TextField(blank=True, null=True)
tsvector = SearchVector(blank=True, null=True) # This field type is a guess.
class Meta:
managed = False
db_table = 'document_index'
class Sentence(models.Model):
"""
Documents are broken up into sentences, which can be useful when displaying to turkers.
"""
id = models.AutoField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
doc = models.ForeignKey(Document, models.DO_NOTHING)
span = SpanField()
sentence_index = models.SmallIntegerField()
gloss = models.TextField()
token_spans = ArrayField(SpanField())
words = ArrayField(models.TextField())
lemmas = ArrayField(models.TextField())
pos_tags = ArrayField(models.TextField())
ner_tags = ArrayField(models.TextField())
dependencies = models.TextField()
class Meta:
managed = False
db_table = 'sentence'
unique_together = (('doc', 'span'),)
class SuggestedMention(models.Model):
doc = models.ForeignKey(Document, models.DO_NOTHING)
span = SpanField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
sentence_id = models.ForeignKey(Sentence)
mention_type = models.TextField()
canonical_span = SpanField()
gloss = models.TextField()
class Meta:
managed = False
db_table = 'suggested_mention'
unique_together = (('doc', 'span'),)
class SuggestedLink(models.Model):
doc = models.ForeignKey('SuggestedMention', models.DO_NOTHING)
span = SpanField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
link_name = models.TextField()
confidence = models.FloatField(default=1.0)
class Meta:
managed = False
db_table = 'suggested_link'
unique_together = (('doc', 'span'),)
## Submission
#- modified -v
class Submission(models.Model):
id = models.AutoField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
name = models.TextField(help_text='A short descriptive name for your system, which will be displayed on the leaderboard.')
corpus_tag = models.TextField(verbose_name='Document corpus')
details = models.TextField(help_text='A short description for your system.')
active = models.BooleanField(default=True)
objects = models.Manager()
def __str__(self):
return self.name
def __repr__(self):
return "<Submission: {}>".format(self.name)
class Meta:
managed = False
db_table = 'submission'
@property
def corpus_name(self):
return CORPUS_NAMES.get(self.corpus_tag, self.corpus_tag)
@property
def log_file(self):
if os.path.exists(self.log_filename):
return File(open(self.log_filename))
else:
return None
@property
def log_filename(self):
"""
Load the uploaded filename from the server.
"""
return os.path.join(settings.MEDIA_ROOT, 'submissions', '{}.m.log.gz'.format(self.id))
@property
def uploaded_file(self):
if os.path.exists(self.uploaded_filename):
return File(open(self.uploaded_filename))
else:
return None
@property
def uploaded_filename(self):
"""
Load the uploaded filename from the server.
"""
return os.path.join(settings.MEDIA_ROOT, 'submissions', '{}.m.gz'.format(self.id))
@property
def original_file(self):
if os.path.exists(self.original_filename):
return File(open(self.original_filename))
else:
return None
@property
def original_filename(self):
"""
Load the uploaded filename from the server.
"""
return os.path.join(settings.MEDIA_ROOT, 'submissions', '{}.original.gz'.format(self.id))
class SubmissionMention(models.Model):
submission = models.ForeignKey(Submission, models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING)
span = SpanField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
canonical_span = SpanField()
mention_type = models.TextField()
gloss = models.TextField()
def __str__(self):
return self.gloss
def __repr__(self):
return "<Mention: {} @ {}>".format(self.gloss, self.mention_id)
class Meta:
managed = False
db_table = 'submission_mention'
unique_together = (('submission', 'doc', 'span'),)
class SubmissionLink(models.Model):
submission = models.ForeignKey(Submission, models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING)
span = SpanField()
updated = models.DateTimeField(auto_now=True)
link_name = models.TextField()
confidence = models.FloatField()
def __str__(self):
return self.link_name
def __repr__(self):
return "<Link: {} @ {}>".format(self.link_name, self.mention_id)
class Meta:
managed = False
db_table = 'submission_link'
unique_together = (('submission', 'doc', 'span'),)
class SubmissionRelation(models.Model):
submission = models.ForeignKey(Submission, models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING)
subject = SpanField(primary_key=True)
object = SpanField(primary_key=True)
updated = models.DateTimeField(auto_now=True)
relation = models.TextField()
provenances = ArrayField(SpanField())
confidence = models.FloatField()
def __str__(self):
return "{} {} {}".format(self.subject_id, self.relation, self.object_id)
def __repr__(self):
return "<Relation: {} {} {}>".format(self.subject_id, self.relation, self.object_id)
class Meta:
managed = False
db_table = 'submission_relation'
unique_together = (('submission', 'doc', 'subject', 'object'),)
class SubmissionScore(models.Model):
submission = models.OneToOneField(Submission, models.DO_NOTHING)
updated = models.DateTimeField(auto_now=True)
score_type = models.TextField()
score = ScoreField()
left_interval = ScoreField()
right_interval = ScoreField()
class Meta:
managed = False
db_table = 'submission_score'
# == Evaluation batch and question
class EvaluationBatch(models.Model):
id = models.IntegerField(primary_key=True)
created = models.DateTimeField(auto_now=True)
batch_type = models.TextField(choices=[
('exhaustive_entities', 'Exhaustive entities'),
('exhaustive_relations', 'Exhaustive relations'),
('selective_relations', 'Selective relations'),
])
corpus_tag = models.TextField()
description = models.TextField()
class Meta:
managed = False
db_table = 'evaluation_batch'
@property
def status(self):
r"""
Checks the status of an evaluation batch, which is simply the
state of all its children
"""
return api.get_evaluation_batch_status(self.id)
def __repr__(self):
return "<EvaluationBatch: {} on {}>".format(self.batch_type, self.corpus_tag)
def __str__(self):
return "EvaluationBatch {}".format(self.created)
class EvaluationQuestion(models.Model):
CHOICES = [
('pending-turking', 'Uploading to Amazon Mechanical Turk'),
('pending-annotation', 'Crowdsourcing'),
('pending-verification', 'Verifying annotations'),
('pending-aggregation', 'Aggregating annotations'), # Note, we might combine the above two step.
('done', 'Done'),
('revoked', 'Revoked'),
('error', 'Error'),
]
id = models.TextField(primary_key=True)
batch = models.ForeignKey(EvaluationBatch, models.DO_NOTHING, related_name='questions')
created = models.DateTimeField(auto_now=True)
params = models.TextField()
state = models.TextField(choices=CHOICES)
message = models.TextField(null=True)
class Meta:
managed = False
db_table = 'evaluation_question'
unique_together = (('batch', 'id'),)
class MturkBatch(models.Model):
created = models.DateTimeField(auto_now=True)
params = JSONField()
description = models.TextField(blank=True, null=True)
def __repr__(self):
return "<MTurkBatch {}>".format(self.id)
def __str__(self):
return "MTurkBatch {}".format(self.id)
class Meta:
managed = False
db_table = 'mturk_batch'
class MturkHit(models.Model):
CHOICES = [
('pending-annotation', 'Crowdsourcing'),
('pending-aggregation', 'Aggregating'),
('done', 'Done'),
('revoked', 'Revoked'),
('error', 'Error'),
]
id = models.TextField(primary_key=True)
batch = models.ForeignKey(MturkBatch, models.DO_NOTHING)
question_batch = models.ForeignKey(EvaluationBatch, models.DO_NOTHING)
question = models.ForeignKey(EvaluationQuestion, models.DO_NOTHING)
created = models.DateTimeField(auto_now=True)
type_id = models.TextField(blank=True, null=True)
price = models.FloatField(blank=True, null=True)
units = models.IntegerField(blank=True, null=True)
max_assignments = models.IntegerField(blank=True, null=True)
state = models.TextField(blank=True, null=True)
message = models.TextField(blank=True, null=True)
def __repr__(self):
return "<MTurkHIT {}>".format(self.id)
def __str__(self):
return "MTurkHIT {}".format(self.id)
class Meta:
managed = False
db_table = 'mturk_hit'
unique_together = (('batch', 'id'),)
class MturkAssignment(models.Model):
CHOICES = [
('pending-extraction', 'Extracting'),
('pending-validation', 'Validating'),
('pending-payment', 'Paying'),
('pending-rejection-verification', 'Verifying Rejection'),
('verified-rejection', 'Rejecting'),
('approved', 'Approved'),
('rejected', 'Rejected'),
('error', 'Error'),
]
id = models.TextField(primary_key=True)
batch = models.ForeignKey(MturkBatch, models.DO_NOTHING)
hit = models.ForeignKey(MturkHit, models.DO_NOTHING)
created = models.DateTimeField(auto_now=True)
worker_id = models.TextField()
worker_time = models.IntegerField()
response = JSONField()
ignored = models.BooleanField()
verified = models.NullBooleanField()
comments = models.TextField(blank=True, null=True)
state = models.TextField(choices=CHOICES)
message = models.TextField()
def __repr__(self):
return "<MTurkAssignment {}>".format(self.id)
def __str__(self):
return "MTurkAssignment {}".format(self.id)
class Meta:
managed = False
db_table = 'mturk_assignment'
# == Response tables
class EvaluationLink(models.Model):
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
span = SpanField()
created = models.DateTimeField(auto_now=True)
question_batch = models.ForeignKey('EvaluationQuestion', models.DO_NOTHING)
question_id = models.TextField() # Not linking to question because question needs (question_id, batch_id)
link_name = models.TextField()
weight = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'evaluation_link'
unique_together = (('doc', 'span'),)
class EvaluationLinkResponse(models.Model):
assignment = models.ForeignKey('MturkAssignment', models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
span = SpanField()
created = models.DateTimeField(auto_now=True)
question_batch = models.ForeignKey('EvaluationBatch', models.DO_NOTHING)
question_id = models.TextField()
link_name = models.TextField()
weight = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'evaluation_link_response'
unique_together = (('assignment', 'doc', 'span'),)
class EvaluationMention(models.Model):
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
span = SpanField()
created = models.DateTimeField(auto_now=True)
question_id = models.TextField(blank=True, null=True)
question_batch_id = models.IntegerField(blank=True, null=True)
canonical_span = SpanField()
mention_type = models.TextField()
gloss = models.TextField(blank=True, null=True)
weight = models.FloatField()
class Meta:
managed = False
db_table = 'evaluation_mention'
unique_together = (('doc', 'span'),)
class EvaluationMentionResponse(models.Model):
assignment = models.ForeignKey('MturkAssignment', models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
span = SpanField()
created = models.DateTimeField(auto_now=True)
question_batch = models.ForeignKey('EvaluationBatch', models.DO_NOTHING)
question_id = models.TextField()
canonical_span = SpanField()
mention_type = models.TextField()
gloss = models.TextField(blank=True, null=True)
weight = models.FloatField()
class Meta:
managed = False
db_table = 'evaluation_mention_response'
unique_together = (('assignment', 'doc', 'span'),)
class EvaluationRelation(models.Model):
doc = models.ForeignKey(Document, models.DO_NOTHING)
subject = SpanField()
object = SpanField()
created = models.DateTimeField(auto_now=True)
question_batch_id = models.IntegerField()
question_id = models.TextField(primary_key=True)
relation = models.TextField()
weight = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'evaluation_relation'
unique_together = (('doc', 'subject', 'object'),)
class EvaluationRelationResponse(models.Model):
assignment = models.ForeignKey('MturkAssignment', models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
subject = SpanField()
object = SpanField()
created = models.DateTimeField(auto_now=True)
question_batch = models.ForeignKey(EvaluationBatch, models.DO_NOTHING)
question_id = models.TextField()
relation = models.TextField()
weight = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'evaluation_relation_response'
unique_together = (('assignment', 'object', 'doc', 'subject'),)
## Sampling
class SampleBatch(models.Model):
created = models.DateTimeField(auto_now=True)
submission = models.ForeignKey('Submission', models.DO_NOTHING, blank=True, null=True, related_name="sample_batches")
distribution_type = models.TextField()
corpus_tag = models.TextField()
params = JSONField()
class Meta:
managed = False
db_table = 'sample_batch'
class DocumentSample(models.Model):
batch = models.ForeignKey('SampleBatch', models.DO_NOTHING)
doc = models.ForeignKey(Document, models.DO_NOTHING, primary_key=True)
created = models.DateTimeField(auto_now=True)
class Meta:
managed = False
db_table = 'document_sample'
unique_together = (('doc', 'batch'),)
class SubmissionSample(models.Model):
batch = models.ForeignKey(SampleBatch, models.DO_NOTHING)
submission = models.ForeignKey(Submission, models.DO_NOTHING, primary_key=True, related_name='samples')
doc = models.ForeignKey(Document, models.DO_NOTHING)
subject = SpanField()
object = SpanField()
created = models.DateTimeField(auto_now=True)
class Meta:
managed = False
db_table = 'submission_sample'
unique_together = (('submission', 'doc', 'subject', 'object', 'batch'),)
|
# This is an example test settings file for use with the Django test suite.
#
# The 'sqlite3' backend requires only the ENGINE setting (an in-
# memory database will be used). All other backends will require a
# NAME and potentially authentication information. See the
# following section in the docs for more information:
#
# https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/
#
# The different databases that Django supports behave differently in certain
# situations, so it is recommended to run the test suite against as many
# database backends as possible. You may want to create a separate settings
# file for each of the backends you test against.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
},
}
SECRET_KEY = "django_tests_secret_key"
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = ()
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': [
'127.0.0.1:6379:1',
'127.0.0.1:6379:1',
],
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
},
'doesnotexist': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:56379:1',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
},
'sample': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:1,127.0.0.1:6379:1',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
},
}
INSTALLED_APPS = (
'redis_backend_testapp',
'hashring_test',
)
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
import pytest
from pynq import Overlay
from pynq.iop import PMODA
from pynq.iop import PMODB
from pynq.iop import Pmod_OLED
from pynq.tests.util import user_answer_yes
from pynq.tests.util import get_pmod_id
flag = user_answer_yes("\nPmod OLED attached to the board?")
if flag:
global oled_id
pmod_id = get_pmod_id('Pmod OLED')
if pmod_id == 'A':
oled_id = PMODA
elif pmod_id == 'B':
oled_id = PMODB
else:
raise ValueError("Please type in A or B.")
@pytest.mark.run(order=25)
@pytest.mark.skipif(not flag, reason="need OLED attached in order to run")
def test_write_string():
"""Test for the OLED Pmod.
Writes on the OLED the string 'Welcome to PYNQ.' and asks the user to
confirm if it is shown on the OLED. After that, it clears the screen.
This test can be skipped.
"""
global oled
oled = Pmod_OLED(oled_id)
oled.draw_line(0,0,255,0)
oled.draw_line(0,2,255,2)
oled.write('Welcome to PYNQ.',0,1)
oled.draw_line(0,20,255,20)
oled.draw_line(0,22,255,22)
assert user_answer_yes("\nWelcome message shown on the OLED?")
oled.clear()
assert user_answer_yes("OLED screen clear now?")
del oled
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ReprojectLayer.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterCrs
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class ReprojectLayer(GeoAlgorithm):
INPUT = 'INPUT'
TARGET_CRS = 'TARGET_CRS'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Reproject layer')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterCrs(self.TARGET_CRS,
self.tr('Target CRS'), 'EPSG:4326'))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Reprojected')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
crsId = self.getParameterValue(self.TARGET_CRS)
targetCrs = QgsCoordinateReferenceSystem()
targetCrs.createFromUserInput(crsId)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
layer.pendingFields().toList(), layer.wkbType(), targetCrs)
layerCrs = layer.crs()
crsTransform = QgsCoordinateTransform(layerCrs, targetCrs)
outFeat = QgsFeature()
features = vector.features(layer)
total = 100.0 / len(features)
for current, f in enumerate(features):
geom = f.geometry()
geom.transform(crsTransform)
outFeat.setGeometry(geom)
outFeat.setAttributes(f.attributes())
writer.addFeature(outFeat)
progress.setPercentage(int(current * total))
del writer
self.crs = targetCrs
|
#! /usr/bin/python
import requests
import json
import time
import random
from config import EDMUNDS_KEY, EDMNUDS_SECRET
MAX_ATTEMPTS = 6
SLEEP_BASE = 4
SLEEP_RAND = 3
# Increase our sleep time the fewer tries we have left
def smart_sleep(attempts_left, max_attempts):
sleep_seconds = SLEEP_BASE + random.randint(0, SLEEP_RAND * (1 + MAX_ATTEMPTS - attempts_left))
time.sleep(sleep_seconds)
def get_styles(make, model, year, attempts = MAX_ATTEMPTS):
url = 'https://api.edmunds.com/api/vehicle/v2/{0}/{1}/{2}?fmt=json&api_key={3}'.format(make, model, year, EDMUNDS_KEY)
r = requests.get(url)
if r.status_code == 403 and attempts > 0: # Retry
smart_sleep(attempts, MAX_ATTEMPTS)
return get_styles(make, model, year, attempts - 1)
if r.status_code != 200:
print 'Status', r.status_code, r.content
return {}
else:
return json.loads(r.content)
def get_price(style_id, attempts = MAX_ATTEMPTS):
url = 'https://api.edmunds.com/v1/api/tmv/tmvservice/calculateusedtmv?styleid={0}&condition=Outstanding&mileage=25000&zip=07302&fmt=json&api_key={1}'.format(style_id, EDMUNDS_KEY)
r = requests.get(url)
if r.status_code == 403 and attempts > 0: # Retry
smart_sleep(attempts, MAX_ATTEMPTS)
return get_price(style_id, attempts - 1)
if r.status_code != 200:
print 'Status', r.status_code, r.content
return {}
else:
return json.loads(r.content)
def get_average_price(make, model, year):
try:
styles = get_styles(make, model, year)
# print json.dumps(styles, indent=2)
prices = []
for style in styles['styles']:
style_id = style['id']
# Pick arbitrary one for now
price_info = get_price(style_id)
try:
price = price_info['tmv']['totalWithOptions']['usedPrivateParty']
except Exception, e:
print 'Error',e,price_info
price = None
if price and price > 0: # Skip bad records
prices.append(price)
if len(prices) > 0:
return sum(prices)/(1.0 * len(prices))
else:
return None
except Exception, e:
print 'Failed to get price for {0}, {1}, {2}'.format(make, model, year)
return None
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import unittest
import StringIO
import tempfile
from os.path import join, dirname, isfile
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
from pygments.util import uni_open
import support
TESTFILE, TESTDIR = support.location(__file__)
fp = uni_open(TESTFILE, encoding='utf-8')
try:
tokensource = list(PythonLexer().get_tokens(fp.read()))
finally:
fp.close()
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
houtfile = StringIO.StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
noutfile = StringIO.StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
escaped_text = escape_html(noutfile.getvalue())
self.assertEqual(stripped_html, escaped_text)
def test_external_css(self):
# test correct behavior
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
self.assertTrue(isfile(join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css')))
os.unlink(join(dirname(tfile.name), 'fmt1.css'))
try:
os.unlink(join(TESTDIR, 'fmt2.css'))
except OSError:
pass
def test_all_options(self):
for optdict in [dict(nowrap=True),
dict(linenos=True),
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_linenos(self):
optdict = dict(linenos=True)
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre>\s+1\s+2\s+3", html))
def test_linenos_with_startnum(self):
optdict = dict(linenos=True, linenostart=5)
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre>\s+5\s+6\s+7", html))
def test_lineanchors(self):
optdict = dict(lineanchors="foo")
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre><a name=\"foo-1\">", html))
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre><a name=\"foo-5\">", html))
def test_valid_output(self):
# test all available wrappers
fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
import subprocess
po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
stdout=subprocess.PIPE)
ret = po.wait()
output = po.stdout.read()
po.stdout.close()
except OSError:
# nsgmls not available
pass
else:
if ret:
print output
self.assertFalse(ret, 'nsgmls run reported errors')
os.unlink(pathname)
def test_get_style_defs(self):
fmt = HtmlFormatter()
sd = fmt.get_style_defs()
self.assertTrue(sd.startswith('.'))
fmt = HtmlFormatter(cssclass='foo')
sd = fmt.get_style_defs()
self.assertTrue(sd.startswith('.foo'))
sd = fmt.get_style_defs('.bar')
self.assertTrue(sd.startswith('.bar'))
sd = fmt.get_style_defs(['.bar', '.baz'])
fl = sd.splitlines()[0]
self.assertTrue('.bar' in fl and '.baz' in fl)
def test_unicode_options(self):
fmt = HtmlFormatter(title=u'Föö',
cssclass=u'bär',
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
|
import array
import struct
import winreg
from settings import search, zones
def find_dat(dat_id):
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\PlayOnlineUS\\InstallFolder')
ffxi_path = winreg.QueryValueEx(key, '0001')[0]
key.Close()
for i in range(1, 10):
vtable = None
if i == 1:
vtable = open('{}VTABLE.DAT'.format(ffxi_path), 'rb')
else:
vtable = open('{}ROM{}\\VTABLE{}.DAT'.format(ffxi_path, i, i), 'rb')
vtable.seek(dat_id)
temp = vtable.read(1)[0]
vtable.close()
if temp != i:
continue
ftable = None
if i == 1:
ftable = open('{}FTABLE.DAT'.format(ffxi_path), 'rb')
else:
ftable = open('{}ROM{}\\FTABLE{}.DAT'.format(ffxi_path, i, i), 'rb')
ftable.seek(dat_id * 2)
path = struct.unpack('H', ftable.read(2))[0]
ftable.close()
if i == 1:
return '{}ROM\\{}\\{}.DAT'.format(ffxi_path, path >> 7, path & 0x7f)
else:
return '{}ROM{}\\{}\\{}.DAT'.format(ffxi_path, i, path >> 7, path & 0x7f)
return None
def decipher_dialog(dat_file):
dat = open(dat_file, 'rb')
dat_size, first_entry = struct.unpack('II', dat.read(8))
dat_size -= 0x10000000
first_entry ^= 0x80808080
dat.seek(4)
data = bytearray(dat.read())
dat.close()
for i in range(len(data)):
data[i] ^= 0x80
offsets = array.array('I', data[:first_entry])
offsets.append(dat_size)
for i in range(len(offsets)):
offsets[i] -= first_entry
return offsets, bytes(data[first_entry:])
def search_dialog(zones, search):
messages = {}
for zone_id, dat_id in zones.items():
offsets, data = decipher_dialog(find_dat(dat_id))
for i in range(len(offsets) - 1):
message = data[offsets[i]:offsets[i+1]]
for name, string in search.items():
if message == string:
if messages.get(zone_id) is None:
messages[zone_id] = {name: i}
else:
messages[zone_id][name] = i
return messages
def write_lua(messages):
o = open('messages.lua', 'w')
print('messages = {}', file=o)
zone_ids = list(messages.keys())
zone_ids.sort()
for zone_id in zone_ids:
line = []
names = list(messages[zone_id].keys())
names.sort()
for name in names:
line.append('{}={}'.format(name, messages[zone_id][name]))
line = ', '.join(line)
print("messages[{}] = {{{}}}".format(zone_id, line), file=o)
o.close()
write_lua(search_dialog(zones, search))
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from wechat_sdk import WechatBasic
from wechat_sdk.messages import (
TextMessage, VoiceMessage, ImageMessage, VideoMessage, LinkMessage, LocationMessage, EventMessage
)
#wechat = WechatBasic(token=settings.WECHAT_TOKEN, appid=settings.WECHAT_ACCOUNT)
# Create your views here.
@csrf_exempt
def smart_entry(request):
signature = request.REQUEST.get('signature', None)
timestamp = request.REQUEST.get('timestamp', None)
nonce = request.REQUEST.get('nonce', None)
# if it's account authenticate request
echostr = request.REQUEST.get('echostr', None)
if echostr:
return HttpResponse(echostr)
wechat = WechatBasic(token=settings.WECHAT_TOKEN, appid=settings.WECHAT_ACCOUNT)
# 对签名进行校验
if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce):
# 对 XML 数据进行解析 (必要, 否则不可执行 response_text, response_image 等操作)
body_text = request.body
wechat.parse_data(body_text)
# 获得解析结果, message 为 WechatMessage 对象 (wechat_sdk.messages中定义)
message = wechat.get_message()
response = None
if isinstance(message, TextMessage):
response = wechat.response_text(content=u'文字信息')
elif isinstance(message, VoiceMessage):
response = wechat.response_text(content=u'语音信息')
elif isinstance(message, ImageMessage):
response = wechat.response_text(content=u'图片信息')
elif isinstance(message, VideoMessage):
response = wechat.response_text(content=u'视频信息')
elif isinstance(message, LinkMessage):
response = wechat.response_text(content=u'链接信息')
elif isinstance(message, LocationMessage):
response = wechat.response_text(content=u'地理位置信息')
elif isinstance(message, EventMessage): # 事件信息
if message.type == 'subscribe': # 关注事件(包括普通关注事件和扫描二维码造成的关注事件)
if message.key and message.ticket: # 如果 key 和 ticket 均不为空,则是扫描二维码造成的关注事件
response = wechat.response_text(content=u'用户尚未关注时的二维码扫描关注事件')
else:
response = wechat.response_text(content=u'普通关注事件')
elif message.type == 'unsubscribe':
response = wechat.response_text(content=u'取消关注事件')
elif message.type == 'scan':
response = wechat.response_text(content=u'用户已关注时的二维码扫描事件')
elif message.type == 'location':
response = wechat.response_text(content=u'上报地理位置事件')
elif message.type == 'click':
response = wechat.response_text(content=u'自定义菜单点击事件')
elif message.type == 'view':
response = wechat.response_text(content=u'自定义菜单跳转链接事件')
elif message.type == 'templatesendjobfinish':
response = wechat.response_text(content=u'模板消息事件')
return HttpResponse(response)
return HttpResponse("Not implemented yet")
|
#!/usr/bin/env python
"""
Text to speech.
https://pythonadventures.wordpress.com/2011/09/02/linux-python-text-to-speech/
say: say arbitrary text (low quality)
say_with_google: say just one word (high quality)
# from jabbapylib.say.say import say_with_google
"""
import os
from jabbapylib.web import web
from jabbapylib.filesystem import fs
from jabbapylib.multimedia.play import play
from jabbapylib import config as cfg
template = 'https://ssl.gstatic.com/dictionary/static/sounds/de/0/{word}.mp3'
def say(text):
"""Say a given text.
It calls espeak."""
cmd = '{espeak} "{text}" 2>/dev/null'.format(espeak=cfg.ESPEAK, text=text)
os.system(cmd)
def say_with_google(word, autoremove=True, background=False, debug=False):
"""
Say a word with Google.
https://ubuntuincident.wordpress.com/2012/03/27/audio-pronunciation-of-words-from-google/
The return value is a tuple: (found, mp3_file), where
found is True if the word was retrieved successfully (False otherwise), and
mp3_file is the path of the locally saved mp3 (or None if it was not saved).
Set autoremove to False if you want to work with the mp3 later, when this
function returned.
The function stores the mp3 files in /tmp.
"""
found = False # Was the mp3 successfully found?
mp3_file = None # Is the locally saved mp3 file kept?
url = template.format(word=word)
content = web.get_page(url, user_agent=True)
if content:
found = True
fname = '/tmp/{word}.mp3'.format(word=word)
fs.store_content_in_file(content, fname, overwrite=True)
mp3_file = fname
if not debug:
play(fname, background=background)
if autoremove:
os.unlink(fname)
mp3_file = None
else:
found = False
mp3_file = None
return (found, mp3_file)
#############################################################################
if __name__ == "__main__":
text = "linux text to speech"
say(text)
print say_with_google('python')
|
#!/usr/bin/env python
#
# faq.py
#
# Routines to assemble a FAQ list for the Wireshark web site.
# Questions and answer content can be found below. Section and
# question numbers will be automatically generated.
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import string
class faq_section:
def __init__(self, name, secnum):
self.name = name
self.secnum = secnum
self.qa = []
self.subsecs = []
def add_qa(self, question, answer, tag):
q_num = len(self.qa) + 1
q_id = "%s.%d" % (self.get_num_string(), q_num)
self.qa.append( (q_id, question, answer, tag) )
def get_all_qa(self):
return self.qa
def add_subsec(self, subsec):
self.subsecs.append(subsec)
def get_all_subsecs(self):
return self.subsecs
def get_num_string(self):
return "%d" % (self.secnum)
def get_name(self):
return self.name
def get_num_name(self):
return "%s. %s" % (self.get_num_string(), self.name)
def get_header_level(self):
return 3
def print_index(self):
print(("<a href=#sec%s><h%d>%s:</h%d></a>\n" % (self.get_num_string(), self.get_header_level(), self.get_num_name(), self.get_header_level())))
for qa in self.qa:
id = qa[0]
question = qa[1]
print('<p class="faq_q">')
print(('<a class="faq_qnum" href=#q%s>%s %s</a>\n' % (id, id, question)))
print('</p>')
for subsec in self.subsecs:
subsec.print_index()
def print_contents(self):
# Table header
print(("""
<h%d id="sec%s">%s</h%d>
""" % (self.get_header_level(), self.get_num_string(), self.get_num_name(), self.get_header_level())))
# Questions and Answers
for qa in self.qa:
id = qa[0]
question = qa[1]
answer = qa[2]
tag = qa[3]
print('<p class="faq_q">')
if tag is not None:
print(('<span id=%s></span>' % (tag)))
print(('<a href=#q%s class="faq_qnum" id=q%s>Q %s: %s</a>' % (id, id, id, question)))
print('</p>')
print('<p class="faq_a">')
print('<span class="faq_anum">A:</span>\n')
print(answer)
print('</p>')
# Subsections
for subsec in self.subsecs:
subsec.print_contents()
# Table footer
print("")
class faq_subsection(faq_section):
def __init__(self, name, secnum, subsecnum):
self.name = name
self.secnum = secnum
self.subsecnum = subsecnum
self.qa = []
self.subsecs = []
def get_num_string(self):
return "%d.%d" % (self.secnum, self.subsecnum)
def get_header_level(self):
return 2
class faq_subsubsection(faq_section):
def __init__(self, name, secnum, subsecnum, subsubsecnum):
self.name = name
self.secnum = secnum
self.subsecnum = subsecnum
self.subsubsecnum = subsubsecnum
self.qa = []
self.subsecs = []
def get_num_string(self):
return "%d.%d.%d" % (self.secnum, self.subsecnum, self.subsubsecnum)
def get_header_level(self):
return 2
sec_num = 0
subsec_num = 0
subsubsec_num = 0
sections = []
current_section = None
parent_section = None
grandparent_section = None
current_question = None
current_tag = None
# Make a URL of itself
def selflink(text):
return "<a href=\"%s\">%s</a>" % (text, text)
# Add a section
def section(name):
global sec_num
global subsec_num
global subsubsec_num
global current_section
global grandparent_section
assert not current_question
sec_num = sec_num + 1
subsec_num = 0
subsubsec_num = 0
sec = faq_section(name, sec_num)
sections.append(sec)
current_section = sec
grandparent_section = sec
# Add a subsection
def subsection(name):
global subsec_num
global subsubsec_num
global current_section
global parent_section
global grandparent_section
assert not current_question
subsec_num = subsec_num + 1
subsubsec_num = 0
sec = faq_subsection(name, sec_num, subsec_num)
grandparent_section.add_subsec(sec)
current_section = sec
parent_section = sec
# Add a subsubsection
def subsubsection(name):
global subsubsec_num
global current_section
global parent_section
assert not current_question
subsubsec_num = subsubsec_num + 1
sec = faq_subsubsection(name, sec_num, subsec_num, subsubsec_num)
parent_section.add_subsec(sec)
current_section = sec
# Add a question
def question(text, tag=None):
global current_question
global current_tag
assert current_section
assert not current_question
assert not current_tag
current_question = text
current_tag = tag
# Add an answer
def answer(text):
global current_question
global current_tag
assert current_section
assert current_question
current_section.add_qa(current_question, text, current_tag)
current_question = None
current_tag = None
# Create the index
def create_index():
print("""
<h1 id="index">Index</h1>
""")
for sec in sections:
sec.print_index()
print("""
""")
# Print result
def create_output(header='', footer=''):
print(header)
create_index()
for sec in sections:
sec.print_contents()
print(footer)
def main():
header = '''\
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Wireshark Frequently Asked Questions</title>
</head>
<body>
'''
footer = '''\
</body>
</html>
'''
if len(sys.argv) > 1 and sys.argv[1] == '-b': # Only print the document body
header = ''
footer = ''
create_output(header, footer)
#################################################################
section("General Questions")
#################################################################
question("What is Wireshark?")
answer("""
Wireshark® is a network protocol analyzer. It lets you capture and
interactively browse the traffic running on a computer network. It has
a rich and powerful feature set and is world's most popular tool of its
kind. It runs on most computing platforms including Windows, OS X,
Linux, and UNIX. Network professionals, security experts, developers,
and educators around the world use it regularly. It is freely available
as open source, and is released under the GNU General Public License
version 2.
<br>
It is developed and maintained by a global team of protocol experts, and
it is an example of a
<a href="https://en.wikipedia.org/wiki/Disruptive_technology">disruptive
technology</a>.
<br>
Wireshark used to be known as Ethereal®. See the next question
for details about the name change. If you're still using Ethereal, it
is strongly recommended that you upgrade to Wireshark as Ethereal is
unsupported and has known security vulnerabilities.
<br>
For more information, please see the
<a href="https://www.wireshark.org/about.html">About Wireshark</a>
page.
""")
question("What's up with the name change? Is Wireshark a fork?")
answer("""
In May of 2006, Gerald Combs (the original author of Ethereal)
went to work for CACE Technologies (best known for WinPcap).
Unfortunately, he had to leave the Ethereal trademarks behind.
<br>
This left the project in an awkward position. The only reasonable way
to ensure the continued success of the project was to change the name.
This is how Wireshark was born.
<br>
Wireshark is almost (but not quite) a fork. Normally a "fork" of an open source
project results in two names, web sites, development teams, support
infrastructures, etc. This is the case with Wireshark except for one notable
exception -- every member of the core development team is now working on
Wireshark. There has been no active development on Ethereal since the name
change. Several parts of the Ethereal web site (such as the mailing lists,
source code repository, and build farm) have gone offline.
<br>
More information on the name change can be found here:
</p>
<ul class="item_list">
<li><a href="http://www.prweb.com/releases/2006/6/prweb396098.htm">Original press release</a>
<li><a href="http://archive09.linux.com/articles/54968">NewsForge article</a>
<li>Many other articles in <a href="https://www.wireshark.org/bibliography.html">our bibliography</a>
</ul>
<p>
""")
question("Where can I get help?")
answer("""
Community support is available on the
<a href="https://ask.wireshark.org/">Q&A site</a> and on the
wireshark-users mailing list. Subscription information and archives for
all of Wireshark's mailing lists can be found at %s. An IRC channel
dedicated to Wireshark can be found at %s.
<br>
Self-paced and instructor-led training is available at <a
href="http://www.wiresharktraining.com">Wireshark University</a>.
Wireshark University also offers certification via the Wireshark
Certified Network Analyst program.
""" % (selflink("https://www.wireshark.org/mailman/listinfo"),
selflink("irc://irc.freenode.net/wireshark")
))
question("What kind of shark is Wireshark?")
answer("""
<i>carcharodon photoshopia</i>.
""")
question("How is Wireshark pronounced, spelled and capitalized?")
answer("""
Wireshark is pronounced as the word <i>wire</i> followed immediately by
the word <i>shark</i>. Exact pronunciation and emphasis may vary
depending on your locale (e.g. Arkansas).
<br>
It's spelled with a capital <i>W</i>, followed by a lower-case
<i>ireshark</i>. It is not a CamelCase word, i.e., <i>WireShark</i>
is incorrect.
""")
question("How much does Wireshark cost?", "but_thats_not_all")
answer("""
Wireshark is "free software"; you can download it without paying any
license fee. The version of Wireshark you download isn't a "demo"
version, with limitations not present in a "full" version; it
<em>is</em> the full version.
<br>
The license under which Wireshark is issued is <a
href="https://www.gnu.org/licenses/gpl-2.0.html">the GNU General Public
License version 2</a>. See <a
href="https://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html">the GNU
GPL FAQ</a> for some more information.
""")
question("But I just paid someone on eBay for a copy of Wireshark! Did I get ripped off?")
answer("""
That depends. Did they provide any sort of value-added product or service, such
as installation support, installation media, training, trace file analysis, or
funky-colored shark-themed socks? Probably not.
<br>
Wireshark is <a href="https://www.wireshark.org/download.html">available for
anyone to download, absolutely free, at any time</a>. Paying for a copy implies
that you should get something for your money.
""")
question("Can I use Wireshark commercially?")
answer("""
Yes, if, for example, you mean "I work for a commercial organization;
can I use Wireshark to capture and analyze network traffic in our
company's networks or in our customer's networks?"
<br>
If you mean "Can I use Wireshark as part of my commercial product?", see
<a href="#derived_work_gpl">the next entry in the FAQ</a>.
""")
question("Can I use Wireshark as part of my commercial product?",
"derived_work_gpl")
answer("""
As noted, Wireshark is licensed under <a
href="https://www.gnu.org/licenses/gpl-2.0.html">the GNU General Public
License, version 2</a>. The GPL imposes conditions on your use of GPL'ed
code in your own products; you cannot, for example, make a "derived
work" from Wireshark, by making modifications to it, and then sell the
resulting derived work and not allow recipients to give away the
resulting work. You must also make the changes you've made to the
Wireshark source available to all recipients of your modified version;
those changes must also be licensed under the terms of the GPL. See the
<a href="https://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html">GPL
FAQ</a> for more details; in particular, note the answer to <a
href="https://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html#GPLCommercially">the
question about modifying a GPLed program and selling it
commercially</a>, and <a
href="https://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html#LinkingWithGPL">the
question about linking GPLed code with other code to make a proprietary
program</a>.
<br>
You can combine a GPLed program such as Wireshark and a commercial
program as long as they communicate "at arm's length", as per <a
href="https://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html#GPLInProprietarySystem">this
item in the GPL FAQ</a>.
<br>
We recommend keeping Wireshark and your product completely separate,
communicating over sockets or pipes. If you're loading any part of
Wireshark as a DLL, you're probably doing it wrong.
""")
question("What protocols are currently supported?")
answer("""
There are currently hundreds of supported
protocols and media. Details can be found in the
<a
href="https://www.wireshark.org/docs/man-pages/wireshark.html">wireshark(1)</a>
man page.
""")
question("Are there any plans to support {your favorite protocol}?")
answer("""
Support for particular protocols is added to Wireshark as a result of
people contributing that support; no formal plans for adding support for
particular protocols in particular future releases exist.
""")
question("""Can Wireshark read capture files from {your favorite network
analyzer}?""")
answer("""
Support for particular capture file formats is added to Wireshark as a result
of people contributing that support; no formal plans for adding support for
particular capture file formats in particular future releases exist.
<br>
If a network analyzer writes out files in a format already supported by
Wireshark (e.g., in libpcap format), Wireshark may already be able to read
them, unless the analyzer has added its own proprietary extensions to
that format.
<br>
If a network analyzer writes out files in its own format, or has added
proprietary extensions to another format, in order to make Wireshark read
captures from that network analyzer, we would either have to have a
specification for the file format, or the extensions, sufficient to give
us enough information to read the parts of the file relevant to
Wireshark, or would need at least one capture file in that format
<strong>AND</strong> a detailed textual analysis of the packets in that
capture file (showing packet time stamps, packet lengths, and the
top-level packet header) in order to reverse-engineer the file
format.
<br>
Note that there is no guarantee that we will be able to reverse-engineer
a capture file format.
""")
question("What devices can Wireshark use to capture packets?")
answer("""
Wireshark can read live data from Ethernet, Token-Ring, FDDI, serial (PPP
and SLIP) (if the OS on which it's running allows Wireshark to do so),
802.11 wireless LAN (if the OS on which it's running allows Wireshark to
do so), ATM connections (if the OS on which it's running allows Wireshark
to do so), and the "any" device supported on Linux by recent versions of
libpcap.
<br>
See <a href="https://wiki.wireshark.org/CaptureSetup/NetworkMedia">the list of
supported capture media on various OSes</a> for details (several items
in there say "Unknown", which doesn't mean "Wireshark can't capture on
them", it means "we don't know whether it can capture on them"; we
expect that it will be able to capture on many of them, but we haven't
tried it ourselves - if you try one of those types and it works, please
update the wiki page accordingly.
<br>
It can also read a variety of capture file formats, including:
</p>
<ul>
<li> AG Group/WildPackets/Savvius EtherPeek/TokenPeek/AiroPeek/EtherHelp/Packet Grabber captures
<li> AIX's iptrace captures
<li> Accellent's 5Views LAN agent output
<li> Cinco Networks NetXRay captures
<li> Cisco Secure Intrusion Detection System IPLog output
<li> CoSine L2 debug output
<li> DBS Etherwatch VMS text output
<li> Endace Measurement Systems' ERF format captures
<li> EyeSDN USB S0 traces
<li> HP-UX nettl captures
<li> ISDN4BSD project i4btrace captures
<li> Linux Bluez Bluetooth stack hcidump -w traces
<li> Lucent/Ascend router debug output
<li> Microsoft Network Monitor captures
<li> Network Associates Windows-based Sniffer captures
<li> Network General/Network Associates DOS-based Sniffer (compressed or uncompressed) captures
<li> Network Instruments Observer version 9 captures
<li> Novell LANalyzer captures
<li> RADCOM's WAN/LAN analyzer captures
<li> Shomiti/Finisar Surveyor captures
<li> Toshiba's ISDN routers dump output
<li> VMS TCPIPtrace/TCPtrace/UCX$TRACE output
<li> Visual Networks' Visual UpTime traffic capture
<li> libpcap, tcpdump and various other tools using tcpdump's capture format
<li> snoop and atmsnoop output
</ul>
<p>
so that it can read traces from various network types, as captured by
other applications or equipment, even if it cannot itself capture on
those network types.
""")
question("""
Does Wireshark work on Windows Vista or Windows Server 2008?
""")
answer("""
Yes, but if you want to capture packets as a normal user, you must make sure
npf.sys is loaded. Wireshark's installer enables this by default. This is not a
concern if you run Wireshark as Administrator, but this is discouraged. See the
<a
href="https://wiki.wireshark.org/CaptureSetup/CapturePrivileges#windows">CapturePrivileges</a>
page on the wiki for more details.
""")
#################################################################
section("Installing Wireshark")
#################################################################
question("""I installed the Wireshark RPM (or other package); why did
it install TShark but not Wireshark?""")
answer("""
Many distributions have separate Wireshark packages, one for non-GUI
components such as TShark, editcap, dumpcap, etc. and one for the GUI.
If this is the case on your system, there's probably a separate package
named <code>wireshark-gnome</code> or <code>wireshark-gtk+</code>. Find it and
install it.
""")
#################################################################
section("Building Wireshark")
#################################################################
question("""I have libpcap installed; why did the configure script not
find pcap.h or bpf.h?""")
answer("""
Are you sure pcap.h and bpf.h are installed? The official distribution
of libpcap only installs the libpcap.a library file when "make install"
is run. To install pcap.h and bpf.h, you must run "make install-incl".
If you're running Debian or Redhat, make sure you have the "libpcap-dev"
or "libpcap-devel" packages installed.
<br>
It's also possible that pcap.h and bpf.h have been installed in a strange
location. If this is the case, you may have to tweak aclocal.m4.
""")
question("""
Why do I get the error
<em>dftest_DEPENDENCIES was already defined in condition TRUE,
which implies condition HAVE_PLUGINS_TRUE</em>
when I try to build Wireshark from SVN or a SVN snapshot?
""")
answer("""
You probably have automake 1.5 installed on your machine (the command
<kbd>automake --version</kbd> will report the version of automake on
your machine). There is a bug in that version of automake that causes
this problem; upgrade to a later version of automake (1.6 or later).
""")
question("""
Why does the linker fail with a number of "Output line too long." messages
followed by linker errors when I try to build Wireshark?
""")
answer("""
The version of the <code>sed</code> command on your system is incapable of
handling very long lines. On Solaris, for example,
<code>/usr/bin/sed</code> has a line length limit too low to allow
<code>libtool</code> to work; <code>/usr/xpg4/bin/sed</code> can handle it, as
can GNU <code>sed</code> if you have it installed.
<br>
On Solaris, changing your command search path to search
<code>/usr/xpg4/bin</code> before <code>/usr/bin</code> should make the problem
go away; on any platform on which you have this problem, installing GNU
<code>sed</code> and changing your command path to search the directory in
which it is installed before searching the directory with the version of
<code>sed</code> that came with the OS should make the problem go away.
""")
question("""
When I try to build Wireshark on Solaris, why does the link fail
complaining that <code>plugin_list</code> is undefined?
""")
answer("""
This appears to be due to a problem with some versions of the GTK+ and
GLib packages from www.sunfreeware.org; un-install those packages, and
try getting the 1.2.10 versions from that site, or the versions from <a
href="http://www.thewrittenword.com">The Written Word</a>, or the
versions from Sun's GNOME distribution, or the versions from the
supplemental software CD that comes with the Solaris media kit, or build
them from source from <a href="http://www.gtk.org/">the GTK Web
site</a>. Then re-run the configuration script, and try rebuilding
Wireshark. (If you get the 1.2.10 versions from www.sunfreeware.org, and
the problem persists, un-install them and try installing one of the
other versions mentioned.)
""")
question("""
When I try to build Wireshark on Windows, why does the build fail because
of conflicts between <code>winsock.h</code> and <code>winsock2.h</code>?
""")
answer("""
As of Wireshark 0.9.5, you must install WinPcap 2.3 or later, and the
corresponding version of the developer's pack, in order to be able to
compile Wireshark; it will not compile with older versions of the
developer's pack. The symptoms of this failure are conflicts between
definitions in <code>winsock.h</code> and in <code>winsock2.h</code>; Wireshark
uses <code>winsock2.h</code>, but pre-2.3 versions of the WinPcap
developer's packet use <code>winsock.h</code>. (2.3 uses
<code>winsock2.h</code>, so if Wireshark were to use <code>winsock.h</code>, it
would not be able to build with current versions of the WinPcap
developer's pack.)
<br>
Note that the installed version of the developer's pack should be the
same version as the version of WinPcap you have installed.
""")
#################################################################
section("Starting Wireshark")
#################################################################
question("""Why does Wireshark crash with a Bus Error when I try to run
it on Solaris 8?""")
answer("""
Some versions of the GTK+ library from www.sunfreeware.org appear to be
buggy, causing Wireshark to drop core with a Bus Error. Un-install those
packages, and try getting the 1.2.10 version from that site, or the
version from <a href="http://www.thewrittenword.com">The Written
Word</a>, or the version from Sun's GNOME distribution, or the version
from the supplemental software CD that comes with the Solaris media kit,
or build it from source from <a href="http://www.gtk.org/">the GTK Web
site</a>. Update the GLib library to the 1.2.10 version, from the same
source, as well. (If you get the 1.2.10 versions from
www.sunfreeware.org, and the problem persists, un-install them and try
installing one of the other versions mentioned.)
<br>
Similar problems may exist with older versions of GTK+ for earlier
versions of Solaris.
""")
question("""When I try to run Wireshark, why does it complain about
<code>sprint_realloc_objid</code> being undefined?""")
answer("""
Wireshark can only be linked with version 4.2.2 or later of UCD SNMP.
Your version of Wireshark was dynamically linked with such a version of
UCD SNMP; however, you have an older version of UCD SNMP installed,
which means that when Wireshark is run, it tries to link to the older
version, and fails. You will have to replace that version of UCD SNMP
with version 4.2.2 or a later version.
""")
question("""
I've installed Wireshark from Fink on OS X; why is it very slow to
start up?
""")
answer("""
When an application is installed on OS X, prior to 10.4, it is usually
"prebound" to speed up launching the application. (That's what the
"Optimizing" phase of installation is.)
<br>
Fink normally performs prebinding automatically when you install a
package. However, in some rare cases, for whatever reason the prebinding
caches get corrupt, and then not only does prebinding fail, but startup
actually becomes much slower, because the system tries in vain to
perform prebinding "on the fly" as you launch the application. This
fails, causing sometimes huge delays.
<br>
To fix the prebinding caches, run the command
</p>
<pre>
sudo /sw/var/lib/fink/prebound/update-package-prebinding.pl -f
</pre>
<p>
""")
#################################################################
section("Crashes and other fatal errors")
#################################################################
question("""
I have an XXX network card on my machine; if I try to capture on it, why
does my machine crash or reset itself?
""")
answer("""
This is almost certainly a problem with one or more of:
</p>
<ul>
<li>the operating system you're using;
<li>the device driver for the interface you're using;
<li>the libpcap/WinPcap library and, if this is Windows, the WinPcap
device driver;
</ul>
<p>
so:
</p>
<ul>
<li>if you are using Windows, see <a
href="https://www.winpcap.org/contact.htm">the WinPcap support
page</a> - check the "Submitting bugs" section;
<li>if you are using some Linux distribution, some version of BSD, or
some other UNIX-flavored OS, you should report the problem to the
company or organization that produces the OS (in the case of a Linux
distribution, report the problem to whoever produces the distribution).
</ul>
<p>
""")
question("""
Why does my machine crash or reset itself when I select "Start" from the
"Capture" menu or select "Preferences" from the "Edit" menu?
""")
answer("""
Both of those operations cause Wireshark to try to build a list of the
interfaces that it can open; it does so by getting a list of interfaces
and trying to open them. There is probably an OS, driver, or, for
Windows, WinPcap bug that causes the system to crash when this happens;
see the previous question.
""")
#################################################################
section("Capturing packets")
#################################################################
question("""When I use Wireshark to capture packets, why do I see only
packets to and from my machine, or not see all the traffic I'm expecting
to see from or to the machine I'm trying to monitor?""", "promiscsniff")
answer("""
This might be because the interface on which you're capturing is plugged
into an Ethernet or Token Ring switch; on a switched network, unicast
traffic between two ports will not necessarily appear on other ports -
only broadcast and multicast traffic will be sent to all ports.
<br>
Note that even if your machine is plugged into a hub, the "hub" may be
a switched hub, in which case you're still on a switched network.
<br>
Note also that on the Linksys Web site, they say that their
auto-sensing hubs "broadcast the 10Mb packets to the port that operate
at 10Mb only and broadcast the 100Mb packets to the ports that operate
at 100Mb only", which would indicate that if you sniff on a 10Mb port,
you will not see traffic coming sent to a 100Mb port, and <i>vice
versa</i>. This problem has also been reported for Netgear dual-speed
hubs, and may exist for other "auto-sensing" or "dual-speed" hubs.
<br>
Some switches have the ability to replicate all traffic on all ports to
a single port so that you can plug your analyzer into that single port to
sniff all traffic. You would have to check the documentation for the
switch to see if this is possible and, if so, to see how to do this.
See <a href="https://wiki.wireshark.org/SwitchReference">the switch
reference page</a> on <a href="https://wiki.wireshark.org/">the Wireshark
Wiki</a> for information on some switches. (Note that it's a Wiki, so
you can update or fix that information, or add additional information on
those switches or information on new switches, yourself.)
<br>
Note also that many firewall/NAT boxes have a switch built into them;
this includes many of the "cable/DSL router" boxes. If you have a box
of that sort, that has a switch with some number of Ethernet ports into
which you plug machines on your network, and another Ethernet port used
to connect to a cable or DSL modem, you can, at least, sniff traffic
between the machines on your network and the Internet by plugging
the Ethernet port on the router going to the modem, the Ethernet port on
the modem, and the machine on which you're running Wireshark into a hub
(make sure it's not a switching hub, and that, if it's a dual-speed hub,
all three of those ports are running at the same speed.
<br>
If your machine is <em>not</em> plugged into a switched network or a
dual-speed hub, or it is plugged into a switched network but the port is
set up to have all traffic replicated to it, the problem might be that
the network interface on which you're capturing doesn't support
"promiscuous" mode, or because your OS can't put the interface into
promiscuous mode. Normally, network interfaces supply to the host only:
</p>
<ul>
<li>packets sent to one of that host's link-layer addresses;
<li>broadcast packets;
<li>multicast packets sent to a multicast address that the host has
configured the interface to accept.
</ul>
<p>
Most network interfaces can also be put in "promiscuous" mode, in which
they supply to the host all network packets they see. Wireshark will try
to put the interface on which it's capturing into promiscuous mode
unless the "Capture packets in promiscuous mode" option is turned off in
the "Capture Options" dialog box, and TShark will try to put the
interface on which it's capturing into promiscuous mode unless the
<code>-p</code> option was specified. However, some network interfaces
don't support promiscuous mode, and some OSes might not allow interfaces
to be put into promiscuous mode.
<br>
If the interface is not running in promiscuous mode, it won't see any
traffic that isn't intended to be seen by your machine. It
<strong>will</strong> see broadcast packets, and multicast packets sent
to a multicast MAC address the interface is set up to receive.
<br>
You should ask the vendor of your network interface whether it supports
promiscuous mode. If it does, you should ask whoever supplied the
driver for the interface (the vendor, or the supplier of the OS you're
running on your machine) whether it supports promiscuous mode with that
network interface.
<br>
In the case of token ring interfaces, the drivers for some of them, on
Windows, may require you to enable promiscuous mode in order to capture
in promiscuous mode. See <a
href="https://wiki.wireshark.org/CaptureSetup/TokenRing">the Wireshark
Wiki item on Token Ring capturing</a> for details.
<br>
In the case of wireless LAN interfaces, it appears that, when those
interfaces are promiscuously sniffing, they're running in a
significantly different mode from the mode that they run in when they're
just acting as network interfaces (to the extent that it would be a
significant effort for those drivers to support for promiscuously
sniffing <em>and</em> acting as regular network interfaces at the same
time), so it may be that Windows drivers for those interfaces don't
support promiscuous mode.
""")
question("""When I capture with Wireshark, why can't I see any TCP
packets other than packets to and from my machine, even though another
analyzer on the network sees those packets?""")
answer("""
You're probably not seeing <em>any</em> packets other than unicast
packets to or from your machine, and broadcast and multicast packets; a
switch will normally send to a port only unicast traffic sent to the MAC
address for the interface on that port, and broadcast and multicast
traffic - it won't send to that port unicast traffic sent to a MAC
address for some other interface - and a network interface not in
promiscuous mode will receive only unicast traffic sent to the MAC
address for that interface, broadcast traffic, and multicast traffic
sent to a multicast MAC address the interface is set up to receive.
<br>
TCP doesn't use broadcast or multicast, so you will only see your own
TCP traffic, but UDP services may use broadcast or multicast so you'll
see some UDP traffic - however, this is not a problem with TCP traffic,
it's a problem with unicast traffic, as you also won't see all UDP
traffic between other machines.
<br>
I.e., this is probably <a href="#promiscsniff">the same question
as this earlier one</a>; see the response to that question.
""")
question("""Why am I only seeing ARP packets when I try to capture
traffic?""")
answer("""
You're probably on a switched network, and running Wireshark on a machine
that's not sending traffic to the switch and not being sent any traffic
from other machines on the switch. ARP packets are often broadcast
packets, which are sent to all switch ports.
<br>
I.e., this is probably <a href="#promiscsniff">the same question
as this earlier one</a>; see the response to that question.
""")
question("""
Why am I not seeing any traffic when I try to capture traffic?""")
answer("""
Is the machine running Wireshark sending out any traffic on the network
interface on which you're capturing, or receiving any traffic on that
network, or is there any broadcast traffic on the network or multicast
traffic to a multicast group to which the machine running Wireshark
belongs?
<br>
If not, this may just be a problem with promiscuous sniffing, either due
to running on a switched network or a dual-speed hub, or due to problems
with the interface not supporting promiscuous mode; see the response to
<a href="#promiscsniff">this earlier question</a>.
<br>
Otherwise, on Windows, see the response to <a href="#capprobwin">this
question</a> and, on a UNIX-flavored OS, see the response to <a
href="#capprobunix">this question</a>.
""")
question("""
Can Wireshark capture on (my T1/E1 line, SS7 links, etc.)?
""")
answer("""
Wireshark can only capture on devices supported by libpcap/WinPcap. On
most OSes, only devices that can act as network interfaces of the type
that support IP are supported as capture devices for libpcap/WinPcap,
although the device doesn't necessarily have to be running as an IP
interface in order to support traffic capture.
<br>
On Linux and FreeBSD, libpcap 0.8 and later support the API for <a
href="http://www.endace.com/products.htm">Endace Measurement Systems'
DAG cards</a>, so that a system with one of those cards, and its driver
and libraries, installed can capture traffic with those cards with
libpcap-based applications. You would either have to have a version of
Wireshark built with that version of libpcap, or a dynamically-linked
version of Wireshark and a shared libpcap library with DAG support, in
order to do so with Wireshark. You should ask Endace whether that could
be used to capture traffic on, for example, your T1/E1 link.
<br>
See <a href="https://wiki.wireshark.org/CaptureSetup/SS7">the SS7 capture
setup page</a> on <a href="https://wiki.wireshark.org/">the Wireshark
Wiki</a> for current information on capturing SS7 traffic on TDM
links.
""")
question("""How do I put an interface into promiscuous mode?""")
answer("""
By not disabling promiscuous mode when running Wireshark or TShark.
<br>
Note, however, that:
</p>
<ul>
<li>the form of promiscuous mode that libpcap (the library that
programs such as tcpdump, Wireshark, etc. use to do packet capture)
turns on will <strong>not</strong> necessarily be shown if you run
<code>ifconfig</code> on the interface on a UNIX system;
<li>some network interfaces might not support promiscuous mode, and some
drivers might not allow promiscuous mode to be turned on - see <a
href="#promiscsniff">this earlier question</a> for more information on
that;
<li>the fact that you're not seeing any traffic, or are only seeing
broadcast traffic, or aren't seeing any non-broadcast traffic other than
traffic to or from the machine running Wireshark, does not mean that
promiscuous mode isn't on - see <a href="#promiscsniff">this earlier
question</a> for more information on that.
</ul>
<p>
I.e., this is probably <a href="#promiscsniff">the same question
as this earlier one</a>; see the response to that question.
""")
question("""
I can set a display filter just fine; why don't capture filters work?
""")
answer("""
Capture filters currently use a different syntax than display filters. Here's
the corresponding section from the
<a
href="https://www.wireshark.org/docs/man-pages/wireshark.html">wireshark(1)</a>
man page:
<br>
"Display filters in Wireshark are very powerful; more fields are filterable
in Wireshark than in other protocol analyzers, and the syntax you can
use to create your filters is richer. As Wireshark progresses, expect
more and more protocol fields to be allowed in display filters.
<br>
Packet capturing is performed with the pcap library. The capture filter
syntax follows the rules of the pcap library. This syntax is different
from the display filter syntax."
<br>
The capture filter syntax used by libpcap can be found in the
<a href="http://www.tcpdump.org/tcpdump_man.html">tcpdump(8)</a>
man page.
""")
question("""I'm entering valid capture filters; why do I still get
"parse error" errors?""")
answer("""
There is a bug in some versions of libpcap/WinPcap that cause it to
report parse errors even for valid expressions if a previous filter
expression was invalid and got a parse error.
<br>
Try exiting and restarting Wireshark; if you are using a version of
libpcap/WinPcap with this bug, this will "erase" its memory of the
previous parse error. If the capture filter that got the "parse error"
now works, the earlier error with that filter was probably due to this
bug.
<br>
The bug was fixed in libpcap 0.6; 0.4[.x] and 0.5[.x] versions of
libpcap have this bug, but 0.6[.x] and later versions don't.
<br>
Versions of WinPcap prior to 2.3 are based on pre-0.6 versions of
libpcap, and have this bug; WinPcap 2.3 is based on libpcap 0.6.2, and
doesn't have this bug.
<br>
If you are running Wireshark on a UNIX-flavored platform, run "wireshark
-v", or select "About Wireshark..." from the "Help" menu in Wireshark, to
see what version of libpcap it's using. If it's not 0.6 or later, you
will need either to upgrade your OS to get a later version of libpcap,
or will need to build and install a later version of libpcap from <a
href="http://www.tcpdump.org/">the tcpdump.org Web site</a> and then
recompile Wireshark from source with that later version of libpcap.
<br>
If you are running Wireshark on Windows with a pre-2.3 version of
WinPcap, you will need to un-install WinPcap and then download and
install WinPcap 2.3.
""")
question("""
How can I capture packets with CRC errors?
""")
answer("""
Wireshark can capture only the packets that the packet capture library -
libpcap on UNIX-flavored OSes, and the WinPcap port to Windows of libpcap
on Windows - can capture, and libpcap/WinPcap can capture only the
packets that the OS's raw packet capture mechanism (or the WinPcap
driver, and the underlying OS networking code and network interface
drivers, on Windows) will allow it to capture.
<br>
Unless the OS always supplies packets with errors such as invalid CRCs
to the raw packet capture mechanism, or can be configured to do so,
invalid CRCs to the raw packet capture mechanism, Wireshark - and other
programs that capture raw packets, such as tcpdump - cannot capture
those packets. You will have to determine whether your OS needs to be
so configured and, if so, can be so configured, configure it if
necessary and possible, and make whatever changes to libpcap and the
packet capture program you're using are necessary, if any, to support
capturing those packets.
<br>
Most OSes probably do <strong>not</strong> support capturing packets
with invalid CRCs on Ethernet, and probably do not support it on most
other link-layer types. Some drivers on some OSes do support it, such
as some Ethernet drivers on FreeBSD; in those OSes, you might always get
those packets, or you might only get them if you capture in promiscuous
mode (you'd have to determine which is the case).
<br>
Note that libpcap does not currently supply to programs that use it an
indication of whether the packet's CRC was invalid (because the drivers
themselves do not supply that information to the raw packet capture
mechanism); therefore, Wireshark will not indicate which packets had CRC
errors unless the FCS was captured (see the next question) and you're
using Wireshark 0.9.15 and later, in which case Wireshark will check the
CRC and indicate whether it's correct or not.
""")
question("""
How can I capture entire frames, including the FCS?
""")
answer("""
Wireshark can only capture data that the packet capture library -
libpcap on UNIX-flavored OSes, and the WinPcap port to Windows of
libpcap on Windows - can capture, and libpcap/WinPcap can capture only
the data that the OS's raw packet capture mechanism (or the WinPcap
driver, and the underlying OS networking code and network interface
drivers, on Windows) will allow it to capture.
<br>
For any particular link-layer network type, unless the OS supplies the
FCS of a frame as part of the frame, or can be configured to do so,
Wireshark - and other programs that capture raw packets, such as tcpdump
- cannot capture the FCS of a frame. You will have to determine whether
your OS needs to be so configured and, if so, can be so configured,
configure it if necessary and possible, and make whatever changes to
libpcap and the packet capture program you're using are necessary, if
any, to support capturing the FCS of a frame.
<br>
Most OSes do <strong>not</strong> support capturing the FCS of a frame
on Ethernet, and probably do not support it on most other link-layer
types. Some drivres on some OSes do support it, such as some (all?)
Ethernet drivers on NetBSD and possibly the driver for Apple's gigabit
Ethernet interface in OS X; in those OSes, you might always get the
FCS, or you might only get the FCS if you capture in promiscuous mode
(you'd have to determine which is the case).
<br>
Versions of Wireshark prior to 0.9.15 will not treat an Ethernet FCS in a
captured packet as an FCS. 0.9.15 and later will attempt to determine
whether there's an FCS at the end of the frame and, if it thinks there
is, will display it as such, and will check whether it's the correct
CRC-32 value or not.
""")
question("""
I'm capturing packets on a machine on a VLAN; why don't the packets I'm
capturing have VLAN tags?
""")
answer("""
You might be capturing on what might be called a "VLAN interface" - the
way a particular OS makes VLANs plug into the networking stack might,
for example, be to have a network device object for the physical
interface, which takes VLAN packets, strips off the VLAN header and
constructs an Ethernet header, and passes that packet to an internal
network device object for the VLAN, which then passes the packets onto
various higher-level protocol implementations.
<br>
In order to see the raw Ethernet packets, rather than "de-VLANized"
packets, you would have to capture not on the virtual interface for the
VLAN, but on the interface corresponding to the physical network device,
if possible. See <a
href="https://wiki.wireshark.org/CaptureSetup/VLAN">the Wireshark Wiki
item on VLAN capturing</a> for details.
""")
question("""
Why does Wireshark hang after I stop a capture?
""")
answer("""
The most likely reason for this is that Wireshark is trying to look up an
IP address in the capture to convert it to a name (so that, for example,
it can display the name in the source address or destination address
columns), and that lookup process is taking a very long time.
<br>
Wireshark calls a routine in the OS of the machine on which it's running
to convert of IP addresses to the corresponding names. That routine
probably does one or more of:
</p>
<ul><li>a search of a system file listing IP addresses and names;
<li>a lookup using DNS;
<li>on UNIX systems, a lookup using NIS;
<li>on Windows systems, a NetBIOS-over-TCP query.
</ul>
<p>
If a DNS server that's used in an address lookup is not responding, the
lookup will fail, but will only fail after a timeout while the system
routine waits for a reply.
<br>
In addition, on Windows systems, if the DNS lookup of the address fails,
either because the server isn't responding or because there are no
records in the DNS that could be used to map the address to a name, a
NetBIOS-over-TCP query will be made. That query involves sending a
message to the NetBIOS-over-TCP name service on that machine, asking for
the name and other information about the machine. If the machine isn't
running software that responds to those queries - for example, many
non-Windows machines wouldn't be running that software - the lookup will
only fail after a timeout. Those timeouts can cause the lookup to take
a long time.
<br>
If you disable network address-to-name translation - for example, by
turning off the "Enable network name resolution" option in the "Capture
Options" dialog box for starting a network capture - the lookups of the
address won't be done, which may speed up the process of reading the
capture file after the capture is stopped. You can make that setting
the default by selecting "Preferences" from the "Edit" menu, turning off
the "Enable network name resolution" option in the "Name resolution"
options in the preferences disalog box, and using the "Save" button in
that dialog box; note that this will save <em>all</em> your current
preference settings.
<br>
If Wireshark hangs when reading a capture even with network name
resolution turned off, there might, for example, be a bug in one of
Wireshark's dissectors for a protocol causing it to loop infinitely. If
you're not running the most recent release of Wireshark, you should first
upgrade to that release, as, if there's a bug of that sort, it might've
been fixed in a release after the one you're running. If the hang
occurs in the most recent release of Wireshark, the bug should be
reported to <a href="mailto:[email protected]">the Wireshark
developers' mailing list</a> at <code>[email protected]</code>.
<br>
On UNIX-flavored OSes, please try to force Wireshark to dump core, by
sending it a <code>SIGABRT</code> signal (usually signal 6) with the
<code>kill</code> command, and then get a stack trace if you have a debugger
installed. A stack trace can be obtained by using your debugger
(<code>gdb</code> in this example), the Wireshark binary, and the resulting
core file. Here's an example of how to use the gdb command
<code>backtrace</code> to do so.
</p>
<pre>
$ gdb wireshark core
(gdb) backtrace
..... prints the stack trace
(gdb) quit
$
</pre>
<p>
The core dump file may be named "wireshark.core" rather than "core" on
some platforms (e.g., BSD systems).
<br>
Also, if at all possible, please send a copy of the capture file that caused
the problem. When capturing packets, Wireshark normally writes captured
packets to a temporary file, which will probably be in <code>/tmp</code> or
<code>/var/tmp</code> on UNIX-flavored OSes, <code>\\TEMP</code> on the main system disk
(normally <code>\\Documents and Settings\\</code><var>your login name</var>
<code>\\Local Settings\\Temp</code> on the main system disk on Windows
Windows XP and Server 2003, and
<code>\\Users\\<var>your login name</var>\\AppData\\Local\\Temp</code> on the main
system disk on Windows Vista and later, so the capture file will probably be there. If you
are capturing on a single interface, it will have a name of the form,
<code>wireshark_<fmt>_<iface>_YYYYmmddHHMMSS_XXXXXX</code>, where
<fmt> is the capture file format (pcap or pcapng), and <iface> is
the actual name of the interface you are capturing on; otherwise, if you are
capturing on multiple interfaces, it will have a name of the form,
<code>wireshark_<N>_interfaces_YYYYmmddHHMMSS_XXXXXX</code>, where <N>
is the number of simultaneous interfaces you are capturing on. Please don't
send a trace file greater than 1 MB when compressed; instead, make it available
via FTP or HTTP, or say it's available but leave it up to a developer to ask
for it. If the trace file contains sensitive information (e.g., passwords),
then please do not send it.
""")
#################################################################
section("Capturing packets on Windows")
#################################################################
question("""
I'm running Wireshark on Windows; why does some network interface on my
machine not show up in the list of interfaces in the "Interface:" field
in the dialog box popped up by "Capture->Start", and/or why does
Wireshark give me an error if I try to capture on that interface?
""", "capprobwin")
answer("""
If you are running Wireshark on Windows XP,
or Windows Server 2003, and this is the first time you have run a
WinPcap-based program (such as Wireshark, or TShark, or WinDump, or
Analyzer, or...) since the machine was rebooted, you need to run that
program from an account with administrator privileges; once you have run
such a program, you will not need administrator privileges to run any
such programs until you reboot.
<br>
If you are running on Windows Windows XP or Windows Server
2003 and have administrator privileges or a WinPcap-based program has
been run with those privileges since the machine rebooted, this problem
<em>might</em> clear up if you completely un-install WinPcap and then
re-install it.
<br>
If that doesn't work, then note that Wireshark relies on the WinPcap
library, on the WinPcap device driver, and on the facilities that come
with the OS on which it's running in order to do captures.
<br>
Therefore, if the OS, the WinPcap library, or the WinPcap driver don't
support capturing on a particular network interface device, Wireshark
won't be able to capture on that device.
<br>
WinPcap 2.3 has problems supporting PPP WAN interfaces on Windows NT
4.0, Windows 2000, Windows XP, and Windows Server 2003, and, to avoid
those problems, support for PPP WAN interfaces on those versions of
Windows has been disabled in WinPcap 3.0. Regular dial-up lines, ISDN
lines, ADSL connections using PPPoE or PPPoA, and various other lines
such as T1/E1 lines are all PPP interfaces, so those interfaces might
not show up on the list of interfaces in the "Capture Options"
dialog on those OSes.
<br>
On Windows 2000, Windows XP, and Windows Server 2003, but
<strong>not</strong> Windows NT 4.0 or Windows Vista Beta 1, you should
be able to capture on the "GenericDialupAdapter" with WinPcap 3.1. (3.1
beta releases called it the "NdisWanAdapter"; if you're using a 3.1 beta
release, you should un-install it and install the final 3.1 release.)
See <a href="https://wiki.wireshark.org/CaptureSetup/PPP">the Wireshark
Wiki item on PPP capturing</a> for details.
<br>
WinPcap prior to 3.0 does not support multiprocessor machines (note
that machines with a single multi-threaded processor, such as Intel's
new multi-threaded x86 processors, are multiprocessor machines as far as
the OS and WinPcap are concerned), and recent 2.x versions of WinPcap
refuse to operate if they detect that they're running on a
multiprocessor machine, which means that they may not show any network
interfaces. You will need to use WinPcap 3.0 to capture on a
multiprocessor machine.
<br>
If an interface doesn't show up in the list of interfaces in the
"Interface:" field, and you know the name of the interface, try entering
that name in the "Interface:" field and capturing on that device.
<br>
If the attempt to capture on it succeeds, the interface is somehow not
being reported by the mechanism Wireshark uses to get a list of
interfaces. Try listing the interfaces with WinDump; see <a
href="https://www.windump.org/">the WinDump Web site</a>
for information on using WinDump.
<br>
You would run WinDump with the <code>-D</code> flag; if it lists the
interface, please report this to <a
href="mailto:[email protected]">[email protected]</a>
giving full details of the problem, including
</p>
<ul>
<li>the operating system you're using, and the version of that operating
system;
<li>the type of network device you're using;
<li>the output of WinDump.
</ul>
<p>
If WinDump does <em>not</em> list the interface,
this is almost certainly a problem with one or more of:
</p>
<ul>
<li>the operating system you're using;
<li>the device driver for the interface you're using;
<li>the WinPcap library and/or the WinPcap device driver;
</ul>
<p>
so first check <a href="https://www.winpcap.org/misc/faq.htm">the
WinPcap FAQ</a> to see if your problem is mentioned there. If not, then see <a
href="https://www.winpcap.org/contact.htm">the WinPcap support page</a>
- check the "Submitting bugs" section.
<br>
If you are having trouble capturing on a particular network interface,
first try capturing on that device with WinDump; see <a
href="https://www.windump.org/">the WinDump Web site</a>
for information on using WinDump.
<br>
If you can capture on the interface with WinDump, send mail to <a
href="mailto:[email protected]">[email protected]</a>
giving full details of the problem, including
</p>
<ul>
<li>the operating system you're using, and the version of that operating
system;
<li>the type of network device you're using;
<li>the error message you get from Wireshark.
</ul>
<p>
If you <em>cannot</em> capture on the interface with WinDump,
this is almost certainly a problem with one or more of:
</p>
<ul>
<li>the operating system you're using;
<li>the device driver for the interface you're using;
<li>the WinPcap library and/or the WinPcap device driver;
</ul>
<p>
so first check <a href="https://www.winpcap.org/misc/faq.htm">the
WinPcap FAQ</a> to see if your problem is mentioned there. If not, then see <a
href="https://www.winpcap.org/contact.htm">the WinPcap support page</a>
- check the "Submitting bugs" section.
<br>
You may also want to ask the <a
href="mailto:[email protected]">[email protected]</a>
and the <a
href="mailto:[email protected]">[email protected]</a>
mailing lists to see if anybody happens to know about the problem and
know a workaround or fix for the problem. (Note that you will have to
subscribe to that list in order to be allowed to mail to it; see <a
href="https://www.winpcap.org/contact.htm">the WinPcap support
page</a> for information on the mailing list.) In your mail,
please give full details of the problem, as described above, and also
indicate that the problem occurs with WinDump, not just with Wireshark.
""")
question("""
I'm running Wireshark on Windows; why do no network interfaces show up in
the list of interfaces in the "Interface:" field in the dialog box
popped up by "Capture->Start"?
""")
answer("""
This is really <a href="#capprobwin">the same question as a previous
one</a>; see the response to that question.
""")
question("""
I'm running Wireshark on Windows; why doesn't my serial port/ADSL
modem/ISDN modem show up in the list of interfaces in the "Interface:"
field in the dialog box popped up by "Capture->Start"?
""")
answer("""
Internet access on those devices is often done with the Point-to-Point
(PPP) protocol; WinPcap 2.3 has problems supporting PPP WAN interfaces
on Windows NT 4.0, Windows 2000, Windows XP, and Windows Server 2003,
and, to avoid those problems, support for PPP WAN interfaces on those
versions of Windows has been disabled in WinPcap 3.0.
<br>
On Windows 2000, Windows XP, and Windows Server 2003, but
<strong>not</strong> Windows NT 4.0 or Windows Vista Beta 1, you should
be able to capture on the "GenericDialupAdapter" with WinPcap 3.1. (3.1
beta releases called it the "NdisWanAdapter"; if you're using a 3.1 beta
release, you should un-install it and install the final 3.1 release.)
See <a href="https://wiki.wireshark.org/CaptureSetup/PPP">the Wireshark
Wiki item on PPP capturing</a> for details.
""")
question("""
I'm running Wireshark on Windows NT 4.0/Windows 2000/Windows XP/Windows
Server 2003; my machine has a PPP (dial-up POTS, ISDN, etc.) interface,
and it shows up in the "Interface" item in the "Capture Options" dialog
box. Why can no packets be sent on or received from that network while
I'm trying to capture traffic on that interface?""", "nt_ppp_sniff")
answer("""
Some versions of WinPcap have problems with PPP WAN interfaces on
Windows NT 4.0, Windows 2000, Windows XP, and Windows Server 2003; one
symptom that may be seen is that attempts to capture in promiscuous mode
on the interface cause the interface to be incapable of sending or
receiving packets. You can disable promiscuous mode using the
<code>-p</code> command-line flag or the item in the "Capture Preferences"
dialog box, but this may mean that outgoing packets, or incoming
packets, won't be seen in the capture.
<br>
On Windows 2000, Windows XP, and Windows Server 2003, but
<strong>not</strong> Windows NT 4.0 or Windows Vista Beta 1, you should
be able to capture on the "GenericDialupAdapter" with WinPcap 3.1. (3.1
beta releases called it the "NdisWanAdapter"; if you're using a 3.1 beta
release, you should un-install it and install the final 3.1 release.)
See <a href="https://wiki.wireshark.org/CaptureSetup/PPP">the Wireshark
Wiki item on PPP capturing</a> for details.
""")
question("""
I'm running Wireshark on Windows; why am I not seeing any traffic being
sent by the machine running Wireshark?""")
answer("""
If you are running some form of VPN client software, it might be causing
this problem; people have seen this problem when they have Check Point's
VPN software installed on their machine. If that's the cause of the
problem, you will have to remove the VPN software in order to have
Wireshark (or any other application using WinPcap) see outgoing packets;
unfortunately, neither we nor the WinPcap developers know any way to
make WinPcap and the VPN software work well together.
<br>
Also, some drivers for Windows (especially some wireless network
interface drivers) apparently do not, when running in promiscuous mode,
arrange that outgoing packets are delivered to the software that
requested that the interface run promiscuously; try turning promiscuous
mode off.
""")
question("""
When I capture on Windows in promiscuous mode, I can see packets other
than those sent to or from my machine; however, those packets show up
with a "Short Frame" indication, unlike packets to or from my machine.
What should I do to arrange that I see those packets in their entirety?
""")
answer("""
In at least some cases, this appears to be the result of PGPnet running
on the network interface on which you're capturing; turn it off on that
interface.
""")
question("""
I'm trying to capture 802.11 traffic on Windows; why am I not seeing any
packets?
""", "win802_11promisc")
answer("""
At least some 802.11 card drivers on Windows appear not to see any
packets if they're running in promiscuous mode. Try turning promiscuous
mode off; you'll only be able to see packets sent by and received by
your machine, not third-party traffic, and it'll look like Ethernet
traffic and won't include any management or control frames, but that's a
limitation of the card drivers.
<br>
See the archived <a
href="https://web.archive.org/web/20090226193157/http://www.micro-logix.com/winpcap/Supported.asp">MicroLogix's
list of cards supported with WinPcap</a> for information on
support of various adapters and drivers with WinPcap.
""")
question("""
I'm trying to capture 802.11 traffic on Windows; why am I seeing packets
received by the machine on which I'm capturing traffic, but not packets
sent by that machine?
""")
answer("""
This appears to be another problem with promiscuous mode; try turning it
off.
""")
question("""
I'm trying to capture Ethernet VLAN traffic on Windows, and I'm
capturing on a "raw" Ethernet device rather than a "VLAN interface", so
that I can see the VLAN headers; why am I seeing packets received by the
machine on which I'm capturing traffic, but not packets sent by that
machine?
""")
answer("""
The way the Windows networking code works probably means that packets
are sent on a "VLAN interface" rather than the "raw" device, so packets
sent by the machine will only be seen when you capture on the "VLAN
interface". If so, you will be unable to see outgoing packets when
capturing on the "raw" device, so you are stuck with a choice between
seeing VLAN headers and seeing outgoing packets.
""")
#################################################################
section("Capturing packets on UN*Xes")
#################################################################
question("""
I'm running Wireshark on a UNIX-flavored OS; why does some network
interface on my machine not show up in the list of interfaces in the
"Interface:" field in the dialog box popped up by "Capture->Start",
and/or why does Wireshark give me an error if I try to capture on that
interface? """, "capprobunix")
answer("""
You may need to run Wireshark from an account with sufficient privileges
to capture packets, such as the super-user account, or may need to give
your account sufficient privileges to capture packets. Only those
interfaces that Wireshark can open for capturing show up in that list; if
you don't have sufficient privileges to capture on any interfaces, no
interfaces will show up in the list. See
<a href="https://wiki.wireshark.org/CaptureSetup/CapturePrivileges">the
Wireshark Wiki item on capture privileges</a> for details on how to give
a particular account or account group capture privileges on platforms
where that can be done.
<br>
If you are running Wireshark from an account with sufficient privileges,
then note that Wireshark relies on the libpcap library, and on the
facilities that come with the OS on which it's running in order to do
captures. On some OSes, those facilities aren't present by default; see
<a href="https://wiki.wireshark.org/CaptureSetup/CaptureSupport">the
Wireshark Wiki item on adding capture support</a> for details.
<br>
And, even if you're running with an account that has sufficient
privileges to capture, and capture support is present in your OS, if the
OS or the libpcap library don't support capturing on a particular
network interface device or particular types of devices, Wireshark won't
be able to capture on that device.
<br>
On Solaris, note that libpcap 0.6.2 and earlier didn't support Token
Ring interfaces; the current version, 0.7.2, does support Token Ring,
and the current version of Wireshark works with libpcap 0.7.2 and later.
<br>
If an interface doesn't show up in the list of interfaces in the
"Interface:" field, and you know the name of the interface, try entering
that name in the "Interface:" field and capturing on that device.
<br>
If the attempt to capture on it succeeds, the interface is somehow not
being reported by the mechanism Wireshark uses to get a list of
interfaces; please report this to <a
href="mailto:[email protected]">[email protected]</a>
giving full details of the problem, including
</p>
<ul>
<li>the operating system you're using, and the version of that operating
system (for Linux, give both the version number of the kernel and the
name and version number of the distribution you're using);
<li>the type of network device you're using.
</ul>
<p>
If you are having trouble capturing on a particular network interface,
and you've made sure that (on platforms that require it) you've arranged
that packet capture support is present, as per the above, first try
capturing on that device with <code>tcpdump</code>.
<br>
If you can capture on the interface with <code>tcpdump</code>, send mail to
<a
href="mailto:[email protected]">[email protected]</a>
giving full details of the problem, including
</p>
<ul>
<li>the operating system you're using, and the version of that operating
system (for Linux, give both the version number of the kernel and the
name and version number of the distribution you're using);
<li>the type of network device you're using;
<li>the error message you get from Wireshark.
</ul>
<p>
If you <em>cannot</em> capture on the interface with <code>tcpdump</code>,
this is almost certainly a problem with one or more of:
</p>
<ul>
<li>the operating system you're using;
<li>the device driver for the interface you're using;
<li>the libpcap library;
</ul>
<p>
so you should report the problem to the company or organization that
produces the OS (in the case of a Linux distribution, report the problem
to whoever produces the distribution).
<br>
You may also want to ask the <a
href="mailto:[email protected]">[email protected]</a>
and the <a
href="mailto:[email protected]">[email protected]</a>
mailing lists to see if anybody happens to know about the problem and
know a workaround or fix for the problem. In your mail, please give
full details of the problem, as described above, and also indicate that
the problem occurs with <code>tcpdump</code> not just with Wireshark.
""")
question("""
I'm running Wireshark on a UNIX-flavored OS; why do no network interfaces
show up in the list of interfaces in the "Interface:" field in the
dialog box popped up by "Capture->Start"?
""")
answer("""
This is really <a href="#capprobunix">the same question as the previous
one</a>; see the response to that question.
""")
question("""I'm capturing packets on Linux; why do the time stamps have
only 100ms resolution, rather than 1us resolution?""")
answer("""
Wireshark gets time stamps from libpcap/WinPcap, and
libpcap/WinPcap get them from the OS kernel, so Wireshark - and any other
program using libpcap, such as tcpdump - is at the mercy of the time
stamping code in the OS for time stamps.
<br>
At least on x86-based machines, Linux can get high-resolution time
stamps on newer processors with the Time Stamp Counter (TSC) register;
for example, Intel x86 processors, starting with the Pentium Pro, and
including all x86 processors since then, have had a TSC, and other
vendors probably added the TSC at some point to their families of x86
processors.
The Linux kernel must be configured with the CONFIG_X86_TSC option
enabled in order to use the TSC. Make sure this option is enabled in
your kernel.
<br>
In addition, some Linux distributions may have bugs in their versions of
the kernel that cause packets not to be given high-resolution time
stamps even if the TSC is enabled. See, for example, bug 61111 for Red
Hat Linux 7.2. If your distribution has a bug such as this, you may
have to run a standard kernel from kernel.org in order to get
high-resolution time stamps.
""")
#################################################################
section("Capturing packets on wireless LANs")
#################################################################
question("""
How can I capture raw 802.11 frames, including non-data (management,
beacon) frames?
""", "raw_80211_sniff")
answer("""
That depends on the operating system on which you're running, and on the
802.11 interface on which you're capturing.
<br>
This would probably require that you capture in promiscuous mode or in
the mode called "monitor mode" or "RFMON mode". On some platforms, or
with some cards, this might require that you capture in monitor mode -
promiscuous mode might not be sufficient. If you want to capture
traffic on networks other than the one with which you're associated, you
will have to capture in monitor mode.
<br>
Not all operating systems support capturing non-data packets and, even
on operating systems that do support it, not all drivers, and thus not
all interfaces, support it. Even on those that do, monitor mode might
not be supported by the operating system or by the drivers for all
interfaces.
<br>
<strong>NOTE:</strong> an interface running in monitor mode will, on
most if not all platforms, not be able to act as a regular network
interface; putting it into monitor mode will, in effect, take your
machine off of whatever network it's on as long as the interface is in
monitor mode, allowing it only to passively capture packets.
<br>
This means that you should disable name resolution when capturing in
monitor mode; otherwise, when Wireshark (or TShark, or tcpdump) tries
to display IP addresses as host names, it will probably block for a long
time trying to resolve the name because it will not be able to
communicate with any DNS or NIS servers.
<br>
See <a
href="https://wiki.wireshark.org/CaptureSetup/WLAN">the Wireshark
Wiki item on 802.11 capturing</a> for details.
""")
question("""
How do I capture on an 802.11 device in monitor mode?""",
"monitor")
answer("""
Whether you will be able to capture in monitor mode depends on the
operating system, adapter, and driver you're using.
See <a href="#raw_80211_sniff">the previous question</a> for information
on monitor mode, including a link to the Wireshark Wiki page that gives
details on 802.11 capturing.
""")
#################################################################
section("Viewing traffic")
#################################################################
question("Why am I seeing lots of packets with incorrect TCP checksums?")
answer("""
If the packets that have incorrect TCP checksums are all being sent by
the machine on which Wireshark is running, this is probably because the
network interface on which you're capturing does TCP checksum
offloading. That means that the TCP checksum is added to the packet by
the network interface, not by the OS's TCP/IP stack; when capturing on
an interface, packets being sent by the host on which you're capturing
are directly handed to the capture interface by the OS, which means that
they are handed to the capture interface without a TCP checksum being
added to them.
<br>
The only way to prevent this from happening would be to disable TCP
checksum offloading, but
</p>
<ol>
<li>that might not even be possible on some OSes;
<li>that could reduce networking performance significantly.
</ol>
<p>
However, you can disable the check that Wireshark does of the TCP
checksum, so that it won't report any packets as having TCP checksum
errors, and so that it won't refuse to do TCP reassembly due to a packet
having an incorrect TCP checksum. That can be set as an Wireshark
preference by selecting "Preferences" from the "Edit" menu, opening up
the "Protocols" list in the left-hand pane of the "Preferences" dialog
box, selecting "TCP", from that list, turning off the "Check the
validity of the TCP checksum when possible" option, clicking "Save" if
you want to save that setting in your preference file, and clicking
"OK".
<br>
It can also be set on the Wireshark or TShark command line with a
<code>-o tcp.check_checksum:false</code> command-line flag, or manually set
in your preferences file by adding a <code>tcp.check_checksum:false</code>
line.
""")
question("""
I've just installed Wireshark, and the traffic on my local LAN
is boring. Where can I find more interesting captures?
""")
answer("""
We have a collection of strange and exotic sample capture
files at %s""" % (selflink("https://wiki.wireshark.org/SampleCaptures")))
question("""
Why doesn't Wireshark correctly identify RTP packets? It shows them
only as UDP.""")
answer("""
Wireshark can identify a UDP datagram as containing a packet of a
particular protocol running atop UDP only if
</p>
<ol>
<li> The protocol in question has a particular standard port
number, and the UDP source or destination port number is that port
<li> Packets of that protocol can be identified by looking for a
"signature" of some type in the packet - i.e., some data
that, if Wireshark finds it in some particular part of a
packet, means that the packet is almost certainly a packet of
that type.
<li> Some <em>other</em> traffic earlier in the capture indicated that,
for example, UDP traffic between two particular addresses and
ports will be RTP traffic.
</ol>
<p>
RTP doesn't have a standard port number, so 1) doesn't work; it doesn't,
as far as I know, have any "signature", so 2) doesn't work.
<br>
That leaves 3). If there's RTSP traffic that sets up an RTP session,
then, at least in some cases, the RTSP dissector will set things up so
that subsequent RTP traffic will be identified. Currently, that's the
only place we do that; there may be other places.
<br>
However, there will always be places where Wireshark is simply
<b>incapable</b> of deducing that a given UDP flow is RTP; a mechanism
would be needed to allow the user to specify that a given conversation
should be treated as RTP. As of Wireshark 0.8.16, such a mechanism
exists; if you select a UDP or TCP packet, the right mouse button menu
will have a "Decode As..." menu item, which will pop up a dialog box
letting you specify that the source port, the destination port, or both
the source and destination ports of the packet should be dissected as
some particular protocol.
""")
question("""
Why doesn't Wireshark show Yahoo Messenger packets in captures that
contain Yahoo Messenger traffic?""")
answer("""
Wireshark only recognizes as Yahoo Messenger traffic packets to or from TCP
port 3050 that begin with "YPNS", "YHOO", or "YMSG". TCP segments that
start with the middle of a Yahoo Messenger packet that takes more than one
TCP segment will not be recognized as Yahoo Messenger packets (even if the
TCP segment also contains the beginning of another Yahoo Messenger
packet).
""")
#################################################################
section("Filtering traffic")
#################################################################
question("""I saved a filter and tried to use its name to filter the
display; why do I get an "Unexpected end of filter string" error?""")
answer("""
You cannot use the name of a saved display filter as a filter. To
filter the display, you can enter a display filter expression -
<strong>not</strong> the name of a saved display filter - in the
"Filter:" box at the bottom of the display, and type the <Enter> key or
press the "Apply" button (that does not require you to have a saved
filter), or, if you want to use a saved filter, you can press the
"Filter:" button, select the filter in the dialog box that pops up, and
press the "OK" button.""")
question("""
How can I search for, or filter, packets that have a particular string
anywhere in them?
""")
answer("""
If you want to do this when capturing, you can't. That's a feature that
would be hard to implement in capture filters without changes to the
capture filter code, which, on many platforms, is in the OS kernel and,
on other platforms, is in the libpcap library.
<br>
After capture, you can search for text by selecting <i>Edit→Find
Packet...</i> and making sure <i>String</i> is selected. Alternately, you can
use the "contains" display filter operator or "matches" operator if it's
supported on your system.
""")
question("""
How do I filter a capture to see traffic for virus XXX?
""")
answer("""
For some viruses/worms there might be a capture filter to recognize the
virus traffic. Check the <a
href="https://wiki.wireshark.org/CaptureFilters">CaptureFilters</a> page
on the <a href="https://wiki.wireshark.org/">Wireshark Wiki</a> to see if
anybody's added such a filter.
<br>
Note that Wireshark was not designed to be an intrusion detection system;
you might be able to use it as an IDS, but in most cases software
designed to be an IDS, such as <a href="https://www.snort.org/">Snort</a>
or <a href="https://www.prelude-siem.org/">Prelude</a>, will probably work
better.
""")
#################################################################
if __name__ == '__main__':
sys.exit(main())
#################################################################
|
# -*- coding: utf-8 -*-
"""
@author: Austin Nicolai
"""
import sys
from sensor import Sensor
import math
from random import randint
class Rover_Sensor(Sensor):
def __init__(self, sector, location, rover_heading, observation_range, sensor_range, sensor_noise):
super(Rover_Sensor, self).__init__(sector, location, rover_heading, sensor_range, sensor_noise)
self.observation_range = observation_range
def getRoverCount(self, rover_list, min_observation_dist):
# determine the total rovers seen
rover_count = 0
# loop over all rovers
for rover in rover_list:
# determine the distance to the rover
distance = self.location - rover.location
# add sensor noise to the distance
random_noise = randint(-self.sensor_noise, self.sensor_noise)
distance = distance * (1. + random_noise/100.)
# determine the angle to the rover
dx = rover.location.x - self.location.x
dy = rover.location.y - self.location.y
if dx == 0: # exception for rovers that are on top of each other (or identical)
dx = sys.float_info.min
angle = math.atan2(dy, dx)
angle = angle * 180. / math.pi # convert to degrees
# ensure angle in range [0, 360]
if angle < 0:
angle += 360
# angle range is: [left_edge, right_edge)
# if distance is 0, the rovers are on top of eachother and can be seen:
if distance == 0:
sum_dist = max(distance**2, min_observation_dist**2)
rover_count += (1./sum_dist)
# if angle range straddles 0:
elif (self.left_edge < 90) and (self.right_edge > 270):
if (distance <= self.sensor_range) and ((0 <= angle <= self.left_edge) or (360 > angle > self.right_edge)):
sum_dist = max(distance**2, min_observation_dist**2)
rover_count += (1./sum_dist)
# if angle range is typical:
elif (distance <= self.sensor_range) and (self.right_edge < angle <= self.left_edge):
sum_dist = max(distance**2, min_observation_dist**2)
rover_count += (1./sum_dist)
return rover_count
def getObservableRovers(self, rover_list):
# determine the observable rovers
rover_indices = []
# loop over all rovers
for rover in rover_list:
# determine the distance to the rover
distance = self.location - rover.location
# add sensor noise to the distance
distance = distance * (1. + self.sensor_noise/100.)
# determine the angle to the rover
dx = self.location.x - rover.location.x
dy = self.location.y - rover.location.y
if dx == 0:
dx = sys.float_info.min
angle = math.atan2(dy, dx)
angle = angle * 180. / math.pi # convert to degrees
# ensure angle in range [0, 360]
if angle < 0:
angle += 360
# angle range is: [left_edge, right_edge)
# if distance is 0, the rovers are on top of eachother and can be seen:
if distance == 0:
rover_indices.append(rover)
# if angle range straddles 0:
elif (distance <= self.observation_range) and (0 <= angle <= self.left_edge) and (360 > angle > self.right_edge):
rover_indices.append(rover)
# if angle range is typical:
elif (distance <= self.observation_range) and (angle <= self.left_edge) and (angle > self.right_edge):
rover_indices.append(rover)
return rover_indices
|
# -*- coding: utf-8 -*-
import urlparse
from dirtyfields import DirtyFieldsMixin
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.contrib.contenttypes.fields import GenericRelation
from framework.postcommit_tasks.handlers import enqueue_postcommit_task
from framework.exceptions import PermissionsError
from osf.models import NodeLog, Subject
from osf.models.validators import validate_subject_hierarchy
from osf.utils.fields import NonNaiveDateTimeField
from website.preprints.tasks import on_preprint_updated, get_and_set_preprint_identifiers
from website.project.licenses import set_license
from website.util import api_v2_url
from website.util.permissions import ADMIN
from website import settings
from reviews.models.mixins import ReviewableMixin
from osf.models.base import BaseModel, GuidMixin
from osf.models.identifiers import IdentifierMixin, Identifier
class PreprintService(DirtyFieldsMixin, GuidMixin, IdentifierMixin, ReviewableMixin, BaseModel):
date_created = NonNaiveDateTimeField(auto_now_add=True)
date_modified = NonNaiveDateTimeField(auto_now=True)
provider = models.ForeignKey('osf.PreprintProvider',
on_delete=models.SET_NULL,
related_name='preprint_services',
null=True, blank=True, db_index=True)
node = models.ForeignKey('osf.AbstractNode', on_delete=models.SET_NULL,
related_name='preprints',
null=True, blank=True, db_index=True)
is_published = models.BooleanField(default=False, db_index=True)
date_published = NonNaiveDateTimeField(null=True, blank=True)
license = models.ForeignKey('osf.NodeLicenseRecord',
on_delete=models.SET_NULL, null=True, blank=True)
subjects = models.ManyToManyField(blank=True, to='osf.Subject', related_name='preprint_services')
identifiers = GenericRelation(Identifier, related_query_name='preprintservices')
class Meta:
unique_together = ('node', 'provider')
permissions = (
('view_preprintservice', 'Can view preprint service details in the admin app.'),
)
def __unicode__(self):
return '{} preprint (guid={}) of {}'.format('published' if self.is_published else 'unpublished', self._id, self.node.__unicode__())
@property
def _verified_publishable(self):
return self.is_published and self.node.is_preprint and not self.node.is_deleted
@property
def primary_file(self):
if not self.node:
return
return self.node.preprint_file
@property
def article_doi(self):
if not self.node:
return
return self.node.preprint_article_doi
@property
def preprint_doi(self):
return self.get_identifier_value('doi')
@property
def is_preprint_orphan(self):
if not self.node:
return
return self.node.is_preprint_orphan
@cached_property
def subject_hierarchy(self):
return [
s.object_hierarchy for s in self.subjects.exclude(children__in=self.subjects.all())
]
@property
def deep_url(self):
# Required for GUID routing
return '/preprints/{}/'.format(self._primary_key)
@property
def url(self):
if (self.provider.domain_redirect_enabled and self.provider.domain) or self.provider._id == 'osf':
return '/{}/'.format(self._id)
return '/preprints/{}/{}/'.format(self.provider._id, self._id)
@property
def absolute_url(self):
return urlparse.urljoin(
self.provider.domain if self.provider.domain_redirect_enabled else settings.DOMAIN,
self.url
)
@property
def absolute_api_v2_url(self):
path = '/preprints/{}/'.format(self._id)
return api_v2_url(path)
def has_permission(self, *args, **kwargs):
return self.node.has_permission(*args, **kwargs)
def get_subjects(self):
ret = []
for subj_list in self.subject_hierarchy:
subj_hierarchy = []
for subj in subj_list:
if subj:
subj_hierarchy += ({'id': subj._id, 'text': subj.text}, )
if subj_hierarchy:
ret.append(subj_hierarchy)
return ret
def set_subjects(self, preprint_subjects, auth):
if not self.node.has_permission(auth.user, ADMIN):
raise PermissionsError('Only admins can change a preprint\'s subjects.')
old_subjects = list(self.subjects.values_list('id', flat=True))
self.subjects.clear()
for subj_list in preprint_subjects:
subj_hierarchy = []
for s in subj_list:
subj_hierarchy.append(s)
if subj_hierarchy:
validate_subject_hierarchy(subj_hierarchy)
for s_id in subj_hierarchy:
self.subjects.add(Subject.load(s_id))
self.save(old_subjects=old_subjects)
def set_primary_file(self, preprint_file, auth, save=False):
if not self.node.has_permission(auth.user, ADMIN):
raise PermissionsError('Only admins can change a preprint\'s primary file.')
if preprint_file.node != self.node or preprint_file.provider != 'osfstorage':
raise ValueError('This file is not a valid primary file for this preprint.')
existing_file = self.node.preprint_file
self.node.preprint_file = preprint_file
# only log if updating the preprint file, not adding for the first time
if existing_file:
self.node.add_log(
action=NodeLog.PREPRINT_FILE_UPDATED,
params={
'preprint': self._id
},
auth=auth,
save=False
)
if save:
self.save()
self.node.save()
def set_published(self, published, auth, save=False):
if not self.node.has_permission(auth.user, ADMIN):
raise PermissionsError('Only admins can publish a preprint.')
if self.is_published and not published:
raise ValueError('Cannot unpublish preprint.')
self.is_published = published
if published:
if not (self.node.preprint_file and self.node.preprint_file.node == self.node):
raise ValueError('Preprint node is not a valid preprint; cannot publish.')
if not self.provider:
raise ValueError('Preprint provider not specified; cannot publish.')
if not self.subjects.exists():
raise ValueError('Preprint must have at least one subject to be published.')
self.date_published = timezone.now()
self.node._has_abandoned_preprint = False
self.node.add_log(
action=NodeLog.PREPRINT_INITIATED,
params={
'preprint': self._id
},
auth=auth,
save=False,
)
if not self.node.is_public:
self.node.set_privacy(
self.node.PUBLIC,
auth=None,
log=True
)
# This should be called after all fields for EZID metadta have been set
enqueue_postcommit_task(get_and_set_preprint_identifiers, (), {'preprint': self}, celery=True)
if save:
self.node.save()
self.save()
def set_preprint_license(self, license_detail, auth, save=False):
license_record, license_changed = set_license(self, license_detail, auth, node_type='preprint')
if license_changed:
self.node.add_log(
action=NodeLog.PREPRINT_LICENSE_UPDATED,
params={
'preprint': self._id,
'new_license': license_record.node_license.name
},
auth=auth,
save=False
)
if save:
self.save()
def set_identifier_values(self, doi, ark, save=False):
self.set_identifier_value('doi', doi)
self.set_identifier_value('ark', ark)
if save:
self.save()
def save(self, *args, **kwargs):
first_save = not bool(self.pk)
saved_fields = self.get_dirty_fields() or []
old_subjects = kwargs.pop('old_subjects', [])
ret = super(PreprintService, self).save(*args, **kwargs)
if (not first_save and 'is_published' in saved_fields) or self.is_published:
enqueue_postcommit_task(on_preprint_updated, (self._id,), {'old_subjects': old_subjects}, celery=True)
return ret
|
#!/usr/bin/env python
from utils import utils, inspector
from datetime import datetime
import urllib.parse
import logging
import re
archive = 2002
# options:
# standard since/year options for a year range to fetch from.
#
# component: limit to a specific component. See COMPONENTS dict at bottom.
# limit: only download X number of reports (per component)
# report_id: use in conjunction with 'component' to get only one report
def run(options):
year_range = inspector.year_range(options, archive)
component = options.get('component')
if component:
components = [component]
else:
components = sorted(COMPONENTS.keys())
report_id = options.get('report_id')
limit = int(options.get('limit', 0))
all_audit_reports = {}
for component in components:
logging.info("## Fetching reports for component %s" % component)
url = url_for(options, component)
doc = utils.beautifulsoup_from_url(url)
results = doc.select("#content-area tbody tr")
if not results:
raise inspector.NoReportsFoundError("DHS (%s)" % component)
count = 0
for result in results:
report = report_from(result, component, url)
if not report:
continue
if report_id and (report_id != report['report_id']):
continue
if inspector.year_from(report) not in year_range:
# logging.info("[%s] Skipping, not in requested range." % report['report_id'])
continue
key = (report["report_id"], report["title"])
if key in all_audit_reports:
all_audit_reports[key]["agency"] = "{}, {}".format(all_audit_reports[key]["agency"],
report["agency"])
all_audit_reports[key]["agency_name"] = "{}, {}".format(all_audit_reports[key]["agency_name"],
report["agency_name"])
else:
all_audit_reports[key] = report
count += 1
if limit and (count >= limit):
break
logging.info("## Fetched %i reports for component %s\n\n" % (count, component))
for report in all_audit_reports.values():
inspector.save_report(report)
PDF_DESCRIPTION_RE = re.compile("(.*)\\(PDF, [0-9]+ pages - [0-9.]+ ?[mMkK][bB]\\)")
def report_from(result, component, url):
report = {
'inspector': 'dhs',
'inspector_url': 'https://www.oig.dhs.gov/'
}
link = result.select("td")[1].select("a")[0]
href = link['href']
href = href.replace("/index.php/", "/")
report_url = urllib.parse.urljoin(url, href)
title = link.text.strip()
title = title.replace("\xa0", " ")
title = title.replace(" ", " ")
title = title.replace(", , ", ", ")
title = title.rstrip("( ,.")
pdf_desc_match = PDF_DESCRIPTION_RE.match(title)
if pdf_desc_match:
title = pdf_desc_match.group(1)
title = title.rstrip("( ,.")
if title.endswith("(Redacted)"):
title = title[:-10]
if title.endswith("(SSI)"):
title = title[:-5]
title = title.rstrip("( ,.")
if title == "DHS' Counterintelligence Activities Summary":
title = "DHS' Counterintelligence Activities"
report['url'] = report_url
report['title'] = title
timestamp = result.select("td")[2].text.strip()
published_on = datetime.strptime(timestamp, "%m/%d/%Y")
report['published_on'] = datetime.strftime(published_on, "%Y-%m-%d")
report_id = result.select("td")[0].text.strip()
# Audit numbers are frequently reused, so add the year to our ID
report_id = "%s_%d" % (report_id, published_on.year)
report['report_id'] = report_id
# if component is a top-level DHS thing, file as 'dhs'
# otherwise, the component is the agency for our purposes
if component.startswith('dhs_'):
report['agency'] = 'dhs'
else:
report['agency'] = component
report['agency_name'] = COMPONENTS[component][1]
return report
def url_for(options, component):
return ("https://www.oig.dhs.gov/reports/audits-inspections-and-evaluations"
"?field_dhs_agency_target_id={}"
"&field_oversight_area=All"
"&field_fy_value=All".format(COMPONENTS[component][0]))
# Component handle, with associated ID query string param
# Not every component is an agency. Some of these will be collapsed into 'dhs'
# for a report's 'agency' field.
# Some additional info on DHS components: https://www.dhs.gov/department-components
COMPONENTS = {
'secret_service': (8, "U.S. Secret Service"),
'coast_guard': (7, "U.S. Coast Guard"),
'uscis': (6, "U.S. Citizenship and Immigration Services"),
'tsa': (5, "Transportation Security Administration"),
'ice': (3, "Immigration and Customs Enforcement"),
'fema': (2, "Federal Emergency Management Agency"),
'cbp': (1, "Customs & Border Protection"),
'dhs_other': (9, "Department of Homeland Security"),
'dhs_mgmt': (4, "Department of Homeland Security"),
}
utils.run(run) if (__name__ == "__main__") else None
|
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
#configuration
DATABASE = '/tmp/flasr.db'
DEBUG = True
SECRET_KEY= 'development key'
USERNAME = 'admin'
PASSWORD= 'default'
# create our little application
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries(title, text) values(?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] !=app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
|
# This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
# (c) Sebastian Kapfer 2015
# vim:et:ts=4
"""
Abstract class providing a common implementation for
rubber.latex_modules.dvips and rubber.latex_modules.dvipdfm.
PostScript/PDF generation through dvips/odvips/dvipdfm with Rubber.
When the name of the main compiler is "Omega" (instead of "TeX" for
instance), then "odvips" is used instead of "dvips".
"""
from rubber.util import _
import logging
msg = logging.getLogger (__name__)
import rubber.converters
import rubber.depend
import rubber.util
# FIXME: this class may probably be simplified a lot if inheriting
# from rubber.depend.Shell instead of rubber.depend.Node.
product_extension = { 'dvips':'ps', 'dvipdfm':'pdf' }
class Dvip_Tool_Dep_Node (rubber.depend.Node):
def __init__ (self, document, tool):
super ().__init__ ()
self.tool = tool
assert tool in ('dvipdfm', 'dvips')
self.doc = document
assert type (self.doc.env.final) is rubber.converters.latex.LaTeXDep
self.source = self.doc.env.final.primary_product ()
if not self.source.endswith ('.dvi'):
raise rubber.GenericError (_('Tool %s only produces DVI') % tool)
self.doc.env.final = self
self.add_product (self.source [:-3] + product_extension [tool])
self.add_source (self.source)
self.extra_args = []
self.delegate_commands_to = self
def do_options (self, args):
self.extra_args.extend (args)
def run (self):
# build command line
tool = self.tool
if tool == 'dvips' and self.doc.engine == 'Omega':
tool = 'odvips'
cmd = [ tool ]
cmd.extend (self.extra_args)
cmd.append (self.source)
# run
if rubber.util.execute (cmd) != 0:
msg.error (_('%s failed on %s') % (tool, self.source))
return False
return True
|
from random import randint
import const
class BasePlayer:
def __init__(self):
"""
Initialise the boards: player to empty, opponent to unknown.
"""
self._playerName = "Unknown"
self._playerDescription = "None"
def getName(self):
return self._playerName
def getDescription(self):
return self._playerDescription
def _initBoards(self):
"""
The boards are stored in a "jagged" 2 dimensional list
Example: to access the opponent at position B6 use Opponent[1][5]
(Remember python indexes from 0)
The following convention is used for storing the state of a square:
Unknown = 0
Empty = 1
Occupied = 2
Missed = 3
Hit = 4 (player or opponent)
Initially, the player's board is all
empty, the opponent's is all unknown.
"""
self._playerBoard = [[const.EMPTY] * (6 if x < 6 else 12)
for x in range(12)]
self._opponenBoard = [[const.EMPTY] * (6 if x < 6 else 12)
for x in range(12)]
def deployFleet(self):
"""
Decide where you want your fleet to
be deployed, then return your board.
"""
pass
def chooseMove(self):
"""
Decide what move to make based on current
state of opponent's board and return it.
"""
pass
def setOutcome(self, entry, i1, i2):
"""
Read the outcome of the shot from the keyboard expected
value is const.HIT for hit and const.MISSED for missed.
"""
pass
def getOpponentMove(self, i1, i2):
"""
You might like to keep track of where your
opponent has missed, but here we just acknowledge it.
"""
pass
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import gplearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Py 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [warning.category is warning_class for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>."
% (message, func.__name__))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV", "StandardScaler"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = gplearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='gplearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
#!/usr/bin/python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from builtins import input
from http.server import HTTPServer, SimpleHTTPRequestHandler
from netaddr import IPNetwork
from os import chdir
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
from random import choice, randint
from simulation import Simulation
from socket import htons
from threading import Thread
import sys
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
num_hosts = 9
num_vnis = 4
null = open("/dev/null", "w")
class TunnelSimulation(Simulation):
def __init__(self, ipdb):
super(TunnelSimulation, self).__init__(ipdb)
self.available_ips = [list(IPNetwork("192.168.%d.0/24" % i)[1:-1])
for i in range(0, num_vnis)]
def start(self):
# each entry is tuple of ns_ipdb, out_ifc, in_ifc
host_info = []
for i in range(0, num_hosts):
print("Launching host %i of %i" % (i + 1, num_hosts))
ipaddr = "172.16.1.%d/24" % (100 + i)
host_info.append(self._create_ns("host%d" % i, ipaddr=ipaddr))
with self.ipdb.create(ifname="br100", kind="bridge") as br100:
for host in host_info: br100.add_port(host[1])
br100.up()
# create a vxlan device inside each namespace
for host in host_info:
print("Starting tunnel %i of %i" % (len(self.processes) + 1, num_hosts))
cmd = ["netserver", "-D"]
self.processes.append(NSPopen(host[0].nl.netns, cmd, stdout=null))
for i in range(0, num_vnis):
with host[0].create(ifname="vxlan%d" % i, kind="vxlan",
vxlan_id=10000 + i,
vxlan_link=host[0].interfaces.eth0,
vxlan_port=4789,
vxlan_group="239.1.1.%d" % (1 + i)) as vx:
vx.up()
with host[0].create(ifname="br%d" % i, kind="bridge") as br:
br.add_port(host[0].interfaces["vxlan%d" % i])
br.up()
with host[0].create(ifname="c%da" % i, kind="veth",
peer="c%db" % i) as c:
c.up()
c.add_ip("%s/24" % self.available_ips[i].pop(0))
c.mtu = 1450
br.add_port(host[0].interfaces["c%db" % i])
host[0].interfaces["c%db" % i].up().commit()
# pick one host to start the monitor in
host = host_info[0]
cmd = ["python", "monitor.py"]
p = NSPopen(host[0].nl.netns, cmd)
self.processes.append(p)
def serve_http(self):
chdir("chord-transitions")
# comment below line to see http server log messages
SimpleHTTPRequestHandler.log_message = lambda self, format, *args: None
self.srv = HTTPServer(("", 8080), SimpleHTTPRequestHandler)
self.t = Thread(target=self.srv.serve_forever)
self.t.setDaemon(True)
self.t.start()
print("HTTPServer listening on 0.0.0.0:8080")
try:
sim = TunnelSimulation(ipdb)
sim.start()
sim.serve_http()
input("Press enter to quit:")
finally:
if "br100" in ipdb.interfaces: ipdb.interfaces.br100.remove().commit()
sim.release()
ipdb.release()
null.close()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import jinja2
import json
import urllib
from wordJson import getWordJson
from wordJson import isValidWord
jj2env = jinja2.Environment(
loader = jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'partials')))
# load index of dictionary books
with open(os.path.join(os.path.dirname(__file__),
'json/books.json'), 'r') as f:
dicIndex = json.loads(f.read())
def isValidPrefixAndWord(prefix, word):
if (prefix == None):
if (word != None):
# prefix = None AND word != None
raise Exception("Impossible case: prefix = None AND word != None")
# prefix = None AND word = None
return True
# prefix != None, check prefix sanity
if prefix in [u'a', u'ā', u'b', u'c', u'd', u'ḍ', u'e', u'g', u'h', u'i', u'ī', u'j', u'k', u'l', u'ḷ', u'm', u'ŋ', u'n', u'ñ', u'ṅ', u'ṇ', u'o', u'p', u'r', u's', u't', u'ṭ', u'u', u'ū', u'v', u'y', u'-', u'°']:
# prefix != None AND prefix is valid
if (word == None):
# prefix != None AND prefix is valid AND word == None
return True
# prefix != None AND prefix is valid AND word != None
return isValidWord(word)
else:
# prefix != None AND prefix is invalid
return False
raise Exception("Impossible case: End of isValidPrefixOrWord!")
def getPrefixHtml(prefix):
legalNameOnGAE = urllib.quote(
(u'prefixWordsHtml/%s.html' % prefix).encode('utf-8')
).replace('%', 'Z')
path = os.path.join(os.path.dirname(__file__), legalNameOnGAE)
with open(path, 'r') as f:
return f.read().decode('utf-8')
def getWordHtml(prefix, word):
template = jj2env.get_template('word2.html')
return template.render({'bookExps': json.loads(getWordJson(word)),
'booksIndex': dicIndex,
'word': word})
def getHtmlTitle(userLocale, reqHandlerName, i18n, prefix, word):
if reqHandlerName == 'WordPage':
return word + u' - ' + i18n.ugettext(u'Definition and Meaning') + u' - '
elif reqHandlerName == 'PrefixPage':
return i18n.ugettext(u'Words Start with') + u' ' + prefix + u' - '
else:
return ''
|
import time
class AP(object):
CRYPTO_OPEN = 0
CRYPTO_WEP = 1
CRYPTO_WPA2 = 2
CRYPTO_UNKNOWN = 3
def __init__(self, ssid, bssid, crypto):
self.ssid = ssid
self.bssid = bssid
self.crypto = crypto
def __eq__(self, ap2):
return self.bssid == ap2.bssid and self.ssid == ap2.ssid
def __str__(self):
return "AP: %s/%s" % (self.ssid, self.bssid)
def __repr__(self):
return str(self)
class VisibleSignal(object):
" There's an os specific abstraction for each Networking implementation "
def __init__(self, ap, strength = None):
self.ap = ap
self.strength = strength
def connect(self, using_password = None):
""" Will try to connect to the signal using the suplied password.
If password is not supplied, all ap.possible_passwords will be tried
(and reported on)"""
raise Exception, "Subclass responsibility"
def is_connected(self):
" Answers whether this signal is currently connected or not "
raise Exception, "Subclass responsibility"
class Configuration(object):
def __init__(self, ap, passwords = None):
self.ap = ap
self.passwords = passwords
class Report(object):
TYPE_SUCCESS = 0
TYPE_FAILURE = 1
TYPE_IMPORTED = 2
TYPE_UNKNOWN = 3
def __init__(self, ap, password = None, reporting = TYPE_UNKNOWN):
self.ap = ap
self.password = password
self.reporting = reporting
self.time_stamp = time.gmtime()
class Location(object):
def __init__(self, lat = None, _long = None):
self.lat = lat
self.long = _long
def is_unknown(self):
return self.lat is None or self.long is None
|
import json
import gzip
import lzma
import os
import re
import arrow
try:
import ujson as fast_json
except ImportError:
fast_json = None
def decompress_open(path):
extension = os.path.splitext(path)[1]
if extension in ('.log', '.txt'):
return open
elif extension == '.xz':
return lzma.open
elif extension == '.gz':
return gzip.open
def read(path):
filename = os.path.basename(path)
match = re.match(r'log-(\w+)-', filename)
project = match.group(1)
if fast_json:
fast_loads = fast_json.loads
else:
fast_loads = None
norm_loads = json.loads
with decompress_open(path)(path, mode='rt') as file:
for line in file:
if fast_loads:
try:
doc = fast_loads(line)
except ValueError:
# May be an overflow
doc = norm_loads(line)
else:
doc = norm_loads(line)
item = doc['item']
nickname = doc['by']
date = arrow.get(doc['at']).datetime
size = sum(doc['bytes'].values())
yield project, item, nickname, date, size
|
import unittest
from biicode.common.edition.hive import Hive
from biicode.common.edition.hive_holder import HiveHolder
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
from biicode.common.model.resource import Resource
from biicode.common.model.cells import SimpleCell
from biicode.common.model.content import Content
from biicode.common.model.blob import Blob
from biicode.common.edition.block_holder import BlockHolder, BIICODE_FILE
a1 = BlockVersion.loads('user0/blocka: 1')
an = BlockVersion.loads('user0/blocka')
b2 = BlockVersion.loads('user0/blockb(branch): 2')
bn = BlockVersion.loads('user0/blockb(branch)')
cn = BlockVersion.loads('user0/blockc: -1')
class HiveHolderTest(unittest.TestCase):
def base_version_test(self):
hive = Hive()
hive_holder = HiveHolder(hive, {}, {})
parents_resource = Resource(SimpleCell(a1.block_name + BIICODE_FILE),
Content(id_=None, load=Blob('[parent]\n ' + str(a1))))
hive_holder.add_holder(BlockHolder(a1.block_name, {parents_resource}))
parents_resource = Resource(SimpleCell(b2.block_name + BIICODE_FILE),
Content(id_=None, load=Blob('[parent]\n * ' + str(b2))))
hive_holder.add_holder(BlockHolder(b2.block_name, {parents_resource}))
hive_holder.add_holder(BlockHolder(cn.block_name, {}))
result_table = BlockVersionTable([b.parent for b in hive_holder.block_holders])
self.assertEqual(result_table, BlockVersionTable([a1, b2, cn]))
|
from conans import ConanFile, CMake, tools
import shutil
class LibDisasmConan(ConanFile):
name = "libdisasm"
version = "0.23"
license = "Clarified Artistic License"
description = "An basic x86 disassembler in library form."
topics = ("libdisasm", "disasm")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
exports_sources = [ "CMakeLists.txt", "sizeofvoid.patch" ]
def source(self):
tools.download("https://sourceforge.net/projects/bastard/files/libdisasm/{0}/libdisasm-{0}.tar.gz/download".format(self.version),
"libdisasm-{}.tar.gz".format(self.version))
tools.untargz("libdisasm-{}.tar.gz".format(self.version))
tools.patch(patch_file="sizeofvoid.patch",
base_path="libdisasm-{}".format(self.version))
shutil.move("CMakeLists.txt", "libdisasm-{}/".format(self.version))
def get_env(self):
cmake = CMake(self)
cmake.configure(source_folder="libdisasm-{}".format(self.version))
return cmake
def build(self):
cmake = self.get_env()
cmake.build()
def package(self):
cmake = self.get_env()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["disasm"]
self.cpp_info.includedirs = ["include"]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
A script that can create a track driven sdf model for gazebo.
To alter the dimensions of the robot edit the file settings.config
minimum_pads is essential for the tuning of the model to determine collision problems with the drums
@author: Aris Synodinos
"""
import lxml.etree as ltr
import decimal
from gazebo_sdf import *
file = open('settings.config','r')
for line in file:
if '\n' == line[-1]:
line = line[:-1]
exec(line.split(' = ',2)[0] + ' = ' + line.split(' = ',2)[1])
file.close
[alpha,num_of_pads,new_drum_radius,new_drum_separation] = calculate_alpha(drum_radius,drum_separation,pad_thickness,minimum_pads)
pad_separation = round(decimal.Decimal(0.2*alpha),4)
pad_length = round(decimal.Decimal(0.8*alpha),4)
# CIRCLE THROUGH +/-
sign = [1, -1]
ROOT = ltr.Element("sdf", version="1.4")
MODEL = ltr.SubElement(ROOT, "model", name = "track_creator")
# CREATE THE BASE_LINK
create_base(MODEL,"base_link",0,0,2*drum_radius,drum_separation,track_separation,2*drum_radius)
# CREATE THE DRUMS
for i in range(4):
x = sign[i//2] * drum_separation / 2
y = sign[i%2] * (track_separation / 2 + drum_width / 2)
z = pad_thickness + drum_radius
create_drum(MODEL,"drum"+str(i+1),x,y,z,drum_radius,drum_width)
create_rev_joint(MODEL,"drum_joint"+str(i+1),"base_link","drum"+str(i+1),"0 0 0 0 0 0","0 1 0")
# CREATE THE TRACKS
# CREATE THE RIGHT TRACK
i = 0
totalAlpha = [0]
padcoords = [0, (track_separation+drum_width)/2 , pad_thickness/2, 0, 0, 0]
while i < num_of_pads:
padcoords = transform_pad(padcoords,pad_length,pad_separation,pad_thickness,new_drum_separation,drum_radius,totalAlpha)
add_pad(MODEL,"right_pad"+str(i+1),padcoords,pad_length,pad_width,pad_thickness)
if i != (num_of_pads-1):
create_rev_joint(MODEL, "right_pad_joint"+str(i+1),"right_pad"+str(i+1),"right_pad"+str(i+2),str(-(pad_length+pad_separation)/2)+" 0 0 0 0 0","0 1 0")
else:
create_rev_joint(MODEL, "right_pad_joint"+str(i+1),"right_pad"+str(i+1),"right_pad1",str(-(pad_length+pad_separation)/2)+" 0 0 0 0 0","0 1 0")
i += 1
# CREATE THE LEFT TRACK
i = 0
totalAlpha = [0]
padcoords = [0, -(track_separation+drum_width)/2 , pad_thickness/2, 0, 0, 0]
while i < num_of_pads:
padcoords = transform_pad(padcoords,pad_length,pad_separation,pad_thickness,new_drum_separation,drum_radius,totalAlpha)
add_pad(MODEL,"left_pad"+str(i+1),padcoords,pad_length,pad_width,pad_thickness)
if i != (num_of_pads-1):
create_rev_joint(MODEL, "left_pad_joint"+str(i+1),"left_pad"+str(i+1),"left_pad"+str(i+2),str(-(pad_length+pad_separation)/2)+" 0 0 0 0 0","0 1 0")
else:
create_rev_joint(MODEL, "left_pad_joint"+str(i+1),"left_pad"+str(i+1),"left_pad1",str(-(pad_length+pad_separation)/2)+" 0 0 0 0 0","0 1 0")
i += 1
# WRITE THE MODEL
f = open(output, 'w')
f.write(ltr.tostring(ROOT, pretty_print=True))
f.close()
|
#!/usr/bin/env python
# pylint: disable=R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram ChatAction."""
class ChatAction(object):
"""This object represents a Telegram ChatAction."""
TYPING = 'typing'
UPLOAD_PHOTO = 'upload_photo'
RECORD_VIDEO = 'record_video'
UPLOAD_VIDEO = 'upload_video'
RECORD_AUDIO = 'record_audio'
UPLOAD_AUDIO = 'upload_audio'
UPLOAD_DOCUMENT = 'upload_document'
FIND_LOCATION = 'find_location'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from builtins import range
from builtins import object
from .IC_GrabberDLL import IC_GrabberDLL
from .IC_Camera import IC_Camera
from .IC_Exception import IC_Exception
class IC_ImagingControl(object):
def init_library(self):
"""
Initialise the IC Imaging Control library.
"""
# remember list of unique device names
self._unique_device_names = None
# remember device objects by unique name
self._devices = {}
# no license key needed anymore
err = IC_GrabberDLL.init_library(None)
if err != 1:
raise IC_Exception(err)
def get_unique_device_names(self):
"""
Gets unique names (i.e. model + label + serial) of devices.
:returns: list -- unique devices names.
"""
if self._unique_device_names is None:
# make new list
self._unique_device_names = []
# get num devices, must be called before get_unique_name_from_list()!
num_devices = IC_GrabberDLL.get_device_count()
if num_devices < 0:
raise IC_Exception(num_devices)
# populate list
for i in range(num_devices):
self._unique_device_names.append(IC_GrabberDLL.get_unique_name_from_list(i))
return self._unique_device_names
def get_device(self, unique_device_name):
"""
Gets camera device object based on unique name string.
Will create one only if it doesn't already exist.
:param device_name: string -- the unique name of the device.
:returns: IC_Camera object -- the camera device object requested.
"""
# check name is valid
if unique_device_name in self.get_unique_device_names():
# check if already have a ref to device
if unique_device_name not in self._devices:
# if not, create one
self._devices[unique_device_name] = IC_Camera(unique_device_name)
return self._devices[unique_device_name]
raise IC_Exception(-106)
def close_library(self):
"""
Close the IC Imaging Control library, and close and release all references to camera devices.
"""
# release handle grabber objects of cameras as they won't be needed again.
# try to close & delete each known device, but only if we own the reference to it!
for unique_device_name in self.get_unique_device_names():
if unique_device_name in self._devices:
# close camera device if open
if self._devices[unique_device_name].is_open():
self._devices[unique_device_name].close()
# release grabber of camera device
IC_GrabberDLL.release_grabber(self._devices[unique_device_name]._handle)
# kill refs
self._unique_device_names = None
self._devices = None
# close lib
IC_GrabberDLL.close_library()
|
import pandas as pd
import os
import glob
from .config import ISOCHRONES
from .grid import Grid
class BolometricCorrectionGrid(Grid):
"""Bolometric corrections in different bands, as a function of stuff
Stores bolometric corrections computed on a grid of stellar atmospheric
parameters (Teff, logg, [Fe/H]), Av, and Rv.
Specific implementations of this grid should subclass this
(e.g., `MISTBolometricCorrectionGrid`).
Parameters
----------
bands : list(str)
List of band names, each parsed with `get_band` method.
Tables are downloaded when requested.
"""
index_cols = ("Teff", "logg", "[Fe/H]", "Av", "Rv")
name = None
is_full = True
def __init__(self, bands=None):
self.bands = bands if bands is not None else list(self.default_bands)
self._band_map = None
self._phot_systems = None
self._df = None
self._interp = None
def get_band(self, *args, **kwargs):
return NotImplementedError
def _make_band_map(self):
phot_systems = set()
band_map = {}
for b in self.bands:
phot, band = self.get_band(b)
phot_systems.add(phot)
band_map[b] = band
self._band_map = band_map
self._phot_systems = phot_systems
@property
def band_map(self):
if self._band_map is None:
self._make_band_map()
return self._band_map
@property
def phot_systems(self):
if self._phot_systems is None:
self._make_band_map()
return self._phot_systems
@property
def datadir(self):
return os.path.join(ISOCHRONES, "BC", self.name)
def get_filename(self, phot, feh):
rootdir = self.datadir
sign_str = "m" if feh < 0 else "p"
filename = "feh{0}{1:03.0f}.{2}".format(sign_str, abs(feh) * 100, phot)
return os.path.join(rootdir, filename)
def parse_table(self, filename):
"""Reads text table into dataframe
"""
with open(filename) as fin:
for i, line in enumerate(fin):
if i == 5:
names = line[1:].split()
break
return pd.read_csv(
filename, names=names, delim_whitespace=True, comment="#", index_col=self.index_cols
)
def get_table(self, phot, feh):
return self.parse_table(self.get_filename(phot, feh))
def get_hdf_filename(self, phot):
return os.path.join(self.datadir, "{}.h5".format(phot))
def get_tarball_url(self, phot):
url = "http://waps.cfa.harvard.edu/MIST/BC_tables/{}.txz".format(phot)
return url
def get_tarball_file(self, phot):
return os.path.join(self.datadir, "{}.txz".format(phot))
def get_df(self):
df_all = pd.DataFrame()
for phot in self.phot_systems:
hdf_filename = self.get_hdf_filename(phot=phot)
if not os.path.exists(hdf_filename):
filenames = glob.glob(os.path.join(self.datadir, "*.{}".format(phot)))
if not filenames:
self.extract_tarball(phot=phot)
filenames = glob.glob(os.path.join(self.datadir, "*.{}".format(phot)))
df = pd.concat([self.parse_table(f) for f in filenames]).sort_index()
df.to_hdf(hdf_filename, "df")
df = pd.read_hdf(hdf_filename)
df_all = pd.concat([df_all, df], axis=1)
df_all = df_all.rename(columns={v: k for k, v in self.band_map.items()})
for col in df_all.columns:
if col not in self.bands:
del df_all[col]
return df_all
|
# Screenx functions and variables
from joequery.settings import UWSGI_ENV
import time
import requests
import json
if UWSGI_ENV:
import uwsgi
else:
SCREENX_CACHE = {}
SCREENX_API_CHECK_INTERVAL = 90
# Use ghetto caching if working locally with werkzeug, use UWSGI caching
# if this app is running under UWSGI
def screenx_cache_expired(t):
'''
t: integer timestamp
'''
lastChecked = screenx_cache_get('lastChecked')
interval = SCREENX_API_CHECK_INTERVAL
return t > (lastChecked + interval)
def screenx_cache_set(k,v):
if UWSGI_ENV:
v = str(int(v))
uwsgi.cache_update(k, v)
else:
SCREENX_CACHE[k] = v
def screenx_cache_get(k):
if UWSGI_ENV:
v = int(uwsgi.cache_get(k))
else:
v = SCREENX_CACHE.get(k)
return v
def screenx_check_status():
t = int(time.time())
# Make sure the cache has been initialized
if screenx_cache_get('streaming') is None:
screenx_cache_set('streaming', False)
screenx_cache_set('lastChecked', -1)
if screenx_cache_expired(t):
screenx_cache_set('lastChecked', t)
try:
r = requests.get("http://screenx.tv/screens/status/JoeQuery", timeout=1)
except requests.exceptions.Timeout:
return
if r.status_code == 200:
if r.content == 'null':
screenx_cache_set('streaming', False)
else:
# Example json: {u'casting': True, u'title': u'infinite `date`'}
js = json.loads(r.content)
screenx_cache_set('streaming', js['casting'])
# If not 200, assume API error and try again the next interval
else:
screenx_cache_set('streaming', False)
screenx_cache_set('lastChecked', t)
|
# coding: utf-8
from app import db, app
import hashlib
import re
ROLE_USER = 0
ROLE_ADMIN = 1
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')), db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
nickname = db.Column(db.String(64), index = True, unique = True)
email = db.Column(db.String(120), index = True, unique = True)
role = db.Column(db.SmallInteger, default = ROLE_USER)
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
secondary = followers,
primaryjoin = (followers.c.follower_id == id),
secondaryjoin = (followers.c.followed_id == id),
backref = db.backref('followers', lazy = 'dynamic'),
lazy = 'dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def avatar(self, size):# 头像
# return 'https://www.gravatar.com/avatar/205e460b479e2e5b48aec07710c08d50' + '?d=mm&s=' + str(size)
return 'https://www.gravatar.com/avatar/' + hashlib.md5(self.email.lower()).hexdigest() + '?d=mm&s=' + str(size) # d=mm 决定什么样的图片占位符当用户没有 Gravatar 账户,mm 选项将会返回一个“神秘人”图片,一个人灰色的轮廓。s=N 选项要求头像按照以像素为单位的给定尺寸缩放。
@staticmethod
def make_valid_nickname(nickname):
return re.sub('[^a-zA-Z0-9_\.]', '', nickname)
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() == None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() == None:
break
version += 1
return new_nickname
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc())
def __repr__(self):
return '<User %r>' % (self.nickname)
import sys
if sys.version_info >= (3, 0):
enable_search = False
else:
enable_search = True
import flask_whooshalchemyplus as whooshalchemy
class Post(db.Model):
__searchable__ = ['body'] # 索引 blog 的 body 字段。
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
return '<Post %r>' % (self.body)
if enable_search:
whooshalchemy.whoosh_index(app, Post) # whoosh_index 函数,为这个模型初始化了全文搜索索引。
|
# Copyright 2011-12 Michael Thomas
#
# See www.whatang.org for more information.
#
# This file is part of DrumBurp.
#
# DrumBurp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DrumBurp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DrumBurp. If not, see <http://www.gnu.org/licenses/>
'''
Created on 5 Jan 2011
@author: Mike Thomas
'''
from PyQt4 import QtGui, QtCore
from Data.NotePosition import NotePosition
from Data import DBConstants
from GUI.DBFSMEvents import (LeftPress, MidPress, RightPress,
EditMeasureProperties,
MouseMove, MouseRelease,
ChangeRepeatCount,
SetAlternateEvent,
MeasureCountContext,
SetSticking,
SetBpmEvent)
def _painterSaver(method):
def wrapper(self, painter, *args, **kwargs):
painter.save()
try:
method(self, painter, *args, **kwargs)
finally:
painter.restore()
return wrapper
class QMeasure(QtGui.QGraphicsItem):
def __init__(self, index, qScore, measure, parent):
super(QMeasure, self).__init__(parent)
self._props = qScore.displayProperties
self._qScore = qScore
self._index = index
self._width = 0
self._height = 0
self._base = 0
self._bpmBase = 0
self._repeatBottom = 0
self._notesTop = 0
self._notesBottom = 0
self._stickingBelowTop = 0
self._measureIndex = qScore.score.measurePositionToIndex(
self.measurePosition())
self._highlight = None
self._rect = QtCore.QRectF(0, 0, 0, 0)
self._repeatCountRect = None
self._bpmRect = None
self._alternate = None
self._stickingAbove = None
self._stickingBelow = None
self._showStickingHighlight = False
self._playing = False
self._nextToPlay = False
self._dragHighlight = False
self._potentials = []
self._potentialDrum = None
self._potentialHead = None
self._potentialSet = None
self.setAcceptsHoverEvents(True)
self._measure = measure
self._displayCols = 0
self._setDimensions()
self._isFirst = False
self.update()
def numLines(self):
return self.parentItem().numLines()
def lineIndex(self, index):
return self.parentItem().lineIndex(index)
def _setDimensions(self):
self.prepareGeometryChange()
if self.isSimile():
referredMeasure = self._qScore.score.getReferredMeasure(
self._measureIndex)
self._displayCols = referredMeasure.counter.numBeats()
else:
self._displayCols = len(self._measure)
self._width = self._qScore.xSpacing * self._displayCols
self._height = 0
self._base = 0
self._bpmBase = 0
if self.parentItem().anyMeasureHasBpm():
self._base += self._props.bpmHeight()
if self._props.measureCountsVisible:
self._base += self._props.measureCountHeight()
self._repeatBottom = self._base + self.parentItem().alternateHeight()
self._notesTop = self._repeatBottom
if self.parentItem().showStickingAbove():
self._notesTop += self._qScore.ySpacing
self._height = self._notesTop
self._height += self.numLines() * self._qScore.ySpacing
self._notesBottom = self._height
if self._props.beatCountVisible:
self._height += self._qScore.ySpacing
self._stickingBelowTop = self._height
if self.parentItem().showStickingBelow():
self._height += self._qScore.ySpacing
self._rect.setBottomRight(QtCore.QPointF(self._width, self._height))
def boundingRect(self):
return self._rect
def width(self):
return self._width
def height(self):
return self._height
def setFirst(self, trueFalse):
self._isFirst = trueFalse
def _colourScheme(self):
return self._qScore.parent().colourScheme
def isSimile(self):
return self._measure.simileDistance > 0
@_painterSaver
def _paintNotes(self, painter, xValues):
scheme = self._colourScheme()
scheme.text.setPainter(painter)
font = painter.font()
fontMetric = QtGui.QFontMetrics(font)
numLines = self.numLines()
baseline = self._notesBottom - self._qScore.ySpacing
lineHeight = baseline + (self._qScore.ySpacing / 2.0) - 1
dot = self._qScore.scale
potential = False
if self.isSimile():
simText = "%%%d" % self._measure.simileDistance
left = " "
right = " "
if self._measure.simileIndex > 0:
left = "-"
if self._measure.simileIndex < self._measure.simileDistance - 1:
right = "-"
while len(simText) < len(xValues):
simText = left + simText + right
for drumIndex in xrange(numLines):
lineIndex = self.lineIndex(drumIndex)
for noteTime, x in enumerate(xValues):
if self.isSimile():
if drumIndex == numLines / 2:
text = simText[noteTime]
else:
text = " "
elif (lineIndex == self._potentialDrum
and noteTime in self._potentialSet):
text = self._potentialHead
potential = True
scheme.potential.setPainter(painter)
elif (noteTime, drumIndex) == self._highlight:
potential = True
current = self._measure.noteAt(noteTime, lineIndex)
potentialHead = self._qScore.getCurrentHead()
if potentialHead is None:
potentialHead = self._qScore.score.drumKit.getDefaultHead(
lineIndex)
if current == potentialHead:
text = current
scheme.delete.setPainter(painter)
else:
text = potentialHead
scheme.potential.setPainter(painter)
else:
text = self._measure.noteAt(noteTime, lineIndex)
if text == DBConstants.EMPTY_NOTE:
painter.drawLine(x + dot, lineHeight,
x + self._qScore.xSpacing - dot,
lineHeight)
else:
br = fontMetric.tightBoundingRect(text)
left = x + (self._qScore.xSpacing - br.width()) / 2
offset = br.y() - (self._qScore.ySpacing - br.height()) / 2
painter.drawText(QtCore.QPointF(left, baseline - offset),
text)
if potential:
scheme.text.setPainter(painter)
potential = False
baseline -= self._qScore.ySpacing
lineHeight -= self._qScore.ySpacing
# painter.drawRect(self._rect) # Draw bounding box
@_painterSaver
def _paintHighlight(self, painter, xValues):
if self._highlight is None:
return
noteTime, drumIndex = self._highlight # IGNORE:unpacking-non-sequence
baseline = self._notesTop
countLine = self._notesBottom
x = xValues[noteTime]
scheme = self._colourScheme()
# Highlight count
scheme.noteHighlight.setPainter(painter)
painter.drawRect(x, countLine,
self._qScore.xSpacing - 1,
self._qScore.ySpacing - 1)
# Highlight notes column
scheme.timeHighlight.setPainter(painter)
painter.drawRect(x, baseline,
self._qScore.xSpacing - 1,
self._notesBottom - self._notesTop - 1)
@_painterSaver
def _paintBeatCount(self, painter, xValues):
font = painter.font()
fontMetric = QtGui.QFontMetrics(font)
baseline = self._notesBottom
if self.isSimile():
counter = ["%d" % (beat + 1) for beat in
xrange(self._displayCols)]
else:
counter = self._measure.count()
for noteTime, count in enumerate(counter):
x = xValues[noteTime]
br = fontMetric.tightBoundingRect(count)
left = x + (self._qScore.xSpacing - br.width()) / 2
offset = br.y() - (self._qScore.ySpacing - br.height()) / 2
painter.drawText(QtCore.QPointF(left, baseline - offset), count)
@_painterSaver
def _paintRepeatCount(self, painter):
spacing = self._qScore.scale
self._colourScheme().text.setPainter(painter)
repeatText = '%dx' % self._measure.repeatCount
textWidth = QtGui.QFontMetrics(painter.font()).width(repeatText)
textLocation = QtCore.QPointF(self.width() - textWidth - 2 * spacing,
self._repeatBottom - spacing)
painter.drawText(textLocation, repeatText)
if self._repeatCountRect is None:
self._repeatCountRect = QtCore.QRectF(0, 0, 0, 0)
self._repeatCountRect.setSize(QtCore.QSizeF(textWidth,
self._repeatBottom - self._base))
self._repeatCountRect.moveTopRight(QtCore.QPointF(self.width() - 2 * spacing,
self._base - spacing))
@_painterSaver
def _paintAlternate(self, painter):
altHeight = self.parentItem().alternateHeight()
spacing = self._qScore.scale
self._colourScheme().text.setPainter(painter)
painter.drawLine(0, self._base, self.width() - spacing * 2, self._base)
painter.drawLine(0, self._base, 0, self._notesTop - spacing * 2)
font = painter.font()
font.setItalic(True)
painter.setFont(font)
if self._alternate is None:
self._alternate = QtCore.QRectF(0, 0, 0, 0)
text = self._measure.alternateText
textWidth = QtGui.QFontMetrics(font).width(text)
self._alternate.setSize(QtCore.QSizeF(textWidth, altHeight))
bottomLeft = QtCore.QPointF(2 * spacing, self._repeatBottom - spacing)
self._alternate.moveBottomLeft(bottomLeft)
painter.drawText(2 * spacing, self._repeatBottom - spacing, text)
@_painterSaver
def _paintPlayingHighlight(self, painter):
scheme = self._colourScheme()
if self._playing:
scheme.playingHighlight.setPainter(painter)
elif self._nextToPlay:
scheme.nextPlayingHighlight.setPainter(painter)
else:
return
painter.drawRect(-1, -1, self.width() + 1, self.height() + 1)
painter.drawRect(0, 0, self.width() - 1, self.height() - 1)
@_painterSaver
def _paintMeasureCount(self, painter):
self._colourScheme().text.setPainter(painter)
font = painter.font()
font.setItalic(True)
painter.setFont(font)
painter.drawText(1, self._base - 2, "%d" % (1 + self._measureIndex))
@_painterSaver
def _paintDragHighlight(self, painter):
scheme = self._colourScheme()
scheme.selectedMeasure.setPainter(painter)
painter.drawRect(self._rect)
def _paintSticking(self, painter, sticking, baseline, xValues):
font = painter.font()
fontMetric = QtGui.QFontMetrics(font)
for text, x in zip(sticking, xValues):
if text == " ":
pass
else:
br = fontMetric.tightBoundingRect(text)
left = x + (self._qScore.xSpacing - br.width()) / 2
offset = br.y() - (self._qScore.ySpacing - br.height()) / 2
painter.drawText(QtCore.QPointF(left, baseline - offset),
text)
@_painterSaver
def _paintStickingHighlight(self, painter, stickingRect):
scheme = self._colourScheme()
scheme.sticking.setPainter(painter)
painter.drawRect(stickingRect)
@_painterSaver
def _paintStickingAbove(self, painter, xValues):
if not self.parentItem().showStickingAbove():
self._stickingAbove = None
return
self._stickingAbove = QtCore.QRectF(0, 0, 0, 0)
self._stickingAbove.setSize(QtCore.QSizeF(self.width(),
self._qScore.ySpacing))
spacing = self._qScore.scale
baseline = self._notesTop - spacing
bottomLeft = QtCore.QPointF(0, baseline)
self._stickingAbove.moveBottomLeft(bottomLeft)
sticking = self._measure.aboveText
if self._showStickingHighlight:
self._paintStickingHighlight(painter, self._stickingAbove)
self._paintSticking(painter, sticking,
baseline - self._qScore.ySpacing, xValues)
@_painterSaver
def _paintStickingBelow(self, painter, xValues):
if not self.parentItem().showStickingBelow():
self._stickingBelow = None
return
self._stickingBelow = QtCore.QRectF(0, 0, 0, 0)
self._stickingBelow.setSize(QtCore.QSizeF(self.width(),
self._qScore.ySpacing))
bottomLeft = QtCore.QPointF(0, self._height)
self._stickingBelow.moveBottomLeft(bottomLeft)
sticking = self._measure.belowText
if self._showStickingHighlight:
self._paintStickingHighlight(painter, self._stickingBelow)
self._paintSticking(painter, sticking,
self._height - self._qScore.ySpacing, xValues)
@_painterSaver
def _paintNewBpm(self, painter):
text = "BPM = %d" % self._measure.newBpm
painter.drawText(1, self._bpmBase + self._props.bpmHeight() - 1, text)
textWidth = QtGui.QFontMetrics(painter.font()).width(text)
if self._bpmRect is None:
self._bpmRect = QtCore.QRectF(0, 0, 0, 0)
self._bpmRect.setSize(QtCore.QSizeF(
textWidth, self._props.bpmHeight()))
self._bpmRect.moveTopLeft(QtCore.QPointF(1, self._bpmBase))
@_painterSaver
def paint(self, painter, dummyOption, dummyWidget=None):
if self._dragHighlight:
self._paintDragHighlight(painter)
self._colourScheme().text.setPainter(painter)
font = self._props.noteFont
if font is None:
font = painter.font()
painter.setFont(font)
xValues = [noteTime * self._qScore.xSpacing
for noteTime in xrange(self._displayCols)]
if not self.isSimile() and self._highlight:
self._paintHighlight(painter, xValues)
self._paintNotes(painter, xValues)
if self._measure.newBpm != 0:
self._paintNewBpm(painter)
else:
self._bpmRect = None
if self._props.beatCountVisible:
self._paintBeatCount(painter, xValues)
if self._props.measureCountsVisible and self._isFirst:
self._paintMeasureCount(painter)
if self._measure.isRepeatEnd() and self._measure.repeatCount > 2:
self._paintRepeatCount(painter)
else:
self._repeatCountRect = None
if self._playing or self._nextToPlay:
self._paintPlayingHighlight(painter)
if self._measure.alternateText is not None:
self._paintAlternate(painter)
else:
self._alternate = None
# Sticking
if not self.isSimile():
self._paintStickingAbove(painter, xValues)
self._paintStickingBelow(painter, xValues)
def dataChanged(self, notePosition_):
self._setDimensions()
self.update()
self.parentItem().placeMeasures()
def xSpacingChanged(self):
self._setDimensions()
self.update()
def ySpacingChanged(self):
self._setDimensions()
self.update()
def _isOverNotes(self, lineIndex):
return 0 <= lineIndex < self.numLines()
@staticmethod
def _isOverCount(lineIndex):
return lineIndex == -1
def _isOverRepeatCount(self, point):
return (self._repeatCountRect is not None
and self._repeatCountRect.contains(point))
def _isOverAlternate(self, point):
return (self._alternate is not None
and self._alternate.contains(point))
def _isOverStickingAbove(self, point):
return (self._stickingAbove is not None
and self._stickingAbove.contains(point))
def _isOverStickingBelow(self, point):
return (self._stickingBelow is not None
and self._stickingBelow.contains(point))
def _isOverBpmChange(self, point):
return (self._bpmRect is not None
and self._bpmRect.contains(point))
def _getMouseLine(self, point):
offset = point.y() - self._notesTop
if offset < 0:
return self.numLines()
else:
return self.numLines() - int(offset / self._qScore.ySpacing) - 1
def _getMouseCoords(self, point):
return self._getNoteTime(point), self._getMouseLine(point)
def _getNotePosition(self, point):
x, y = self._getMouseCoords(point)
y = self.lineIndex(y)
return x, y
def _getNoteTime(self, point):
return int(point.x() / self._qScore.xSpacing)
def _hovering(self, event):
point = self.mapFromScene(event.scenePos())
noteTime, lineIndex = self._getMouseCoords(point)
# Set line & time highlights
if self._isOverNotes(lineIndex):
if (noteTime, lineIndex) != self._highlight:
self._highlight = noteTime, lineIndex
self.update()
self.parentItem().setLineHighlight(lineIndex)
realIndex = self.parentItem().lineIndex(lineIndex)
self._qScore.setCurrentHeads(realIndex)
elif self._isOverStickingAbove(point) or self._isOverStickingBelow(point):
self._highlight = noteTime, None
self.update()
elif self._highlight != None:
self._highlight = None
self.parentItem().clearHighlight()
self.update()
# Set status message and cursor
if self._isOverStickingAbove(point) or self._isOverStickingBelow(point):
self._qScore.setStatusMessage("Click to rotate sticking.")
self.setCursor(QtCore.Qt.PointingHandCursor)
elif self._isOverNotes(lineIndex):
if self.isSimile():
self._qScore.setStatusMessage("Right click for options.")
else:
self._qScore.setStatusMessage("Click to toggle notes; "
"middle click to toggle special noteheads; "
"right click for options.")
self.setCursor(QtCore.Qt.ArrowCursor)
elif self._isOverCount(lineIndex):
if self.isSimile():
self._qScore.setStatusMessage("Right click for count options.")
else:
self._qScore.setStatusMessage("Double click to edit measure count; "
"right click for count options.")
self.setCursor(QtCore.Qt.PointingHandCursor)
elif self._isOverRepeatCount(point):
self._qScore.setStatusMessage("Double click to edit repeat count.")
self.setCursor(QtCore.Qt.PointingHandCursor)
elif self._isOverBpmChange(point):
self._qScore.setStatusMessage("Double click to edit BPM change.")
self.setCursor(QtCore.Qt.PointingHandCursor)
elif self._isOverAlternate(point):
self._qScore.setStatusMessage("Double click to edit "
"alternate ending.")
self.setCursor(QtCore.Qt.PointingHandCursor)
else:
self._qScore.setStatusMessage()
self.setCursor(QtCore.Qt.ArrowCursor)
def hoverEnterEvent(self, event):
self._hovering(event)
self._showStickingHighlight = True
event.accept()
def hoverMoveEvent(self, event):
self._hovering(event)
event.accept()
def hoverLeaveEvent(self, event):
self._highlight = None
self._showStickingHighlight = False
self.update()
self.parentItem().clearHighlight()
self._qScore.setCurrentHeads(None)
self.setCursor(QtCore.Qt.ArrowCursor)
self._qScore.setStatusMessage()
event.accept()
def mousePressEvent(self, event):
point = self.mapFromScene(event.scenePos())
eventType = LeftPress
np = None
lineIndex = self._getMouseLine(point)
if self._isOverNotes(lineIndex):
noteTime, drumIndex = self._getNotePosition(point)
np = self.makeNotePosition(noteTime, drumIndex)
if event.button() == QtCore.Qt.MidButton:
eventType = MidPress
elif event.button() == QtCore.Qt.RightButton:
eventType = RightPress
elif self._isOverCount(lineIndex):
if event.button() == QtCore.Qt.RightButton:
eventType = MeasureCountContext
noteTime = self._getNoteTime(point)
np = self.makeNotePosition(noteTime, -1)
self._qScore.sendFsmEvent(eventType(self, np, event.screenPos()))
def mouseMoveEvent(self, event):
item = self._qScore.itemAt(event.scenePos())
if item is self:
point = self.mapFromScene(event.scenePos())
if self._isOverNotes(self._getMouseLine(point)):
np = self._getNotePosition(point)
np = self.makeNotePosition(*np)
self._qScore.sendFsmEvent(MouseMove(self, np))
elif isinstance(item, QMeasure):
item.mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
point = self.mapFromScene(event.scenePos())
np = None
if self._isOverNotes(self._getMouseLine(point)):
noteTime, drumIndex = self._getNotePosition(point)
np = self.makeNotePosition(noteTime, drumIndex)
elif self._isOverStickingAbove(point) and not self.isSimile():
self.setSticking(point, True)
elif self._isOverStickingBelow(point) and not self.isSimile():
self.setSticking(point, False)
self._qScore.sendFsmEvent(MouseRelease(self, np))
def mouseDoubleClickEvent(self, event):
point = self.mapFromScene(event.scenePos())
lineIndex = self._getMouseLine(point)
if self._isOverCount(lineIndex) and not self.isSimile():
counter = self._measure.counter
fsmEvent = EditMeasureProperties(counter,
self._props.counterRegistry,
self.measurePosition())
self._qScore.sendFsmEvent(fsmEvent)
elif self._isOverRepeatCount(point):
fsmEvent = ChangeRepeatCount(self._measure.repeatCount,
self.measurePosition())
self._qScore.sendFsmEvent(fsmEvent)
elif self._isOverBpmChange(point):
self.setNewBpm()
elif self._isOverAlternate(point):
self.setAlternate()
elif self._isOverStickingAbove(point) or self._isOverStickingBelow(point):
# Want this to get picked up by the normal mouseReleaseEvent:
# don't ignore it, or it gets passed to the parent for handling
# as a double click event
event.accept()
else:
event.ignore()
def makeNotePosition(self, noteTime, drumIndex):
np = NotePosition(measureIndex=self._index,
noteTime=noteTime,
drumIndex=drumIndex)
return self.parentItem().augmentNotePosition(np)
def measurePosition(self):
np = NotePosition(measureIndex=self._index)
return self.parentItem().augmentNotePosition(np)
def setAlternate(self):
self._qScore.sendFsmEvent(SetAlternateEvent(self._measure.alternateText,
self.measurePosition()))
def setNewBpm(self):
bpm = self._measure.newBpm
if bpm == 0:
bpm = self._qScore.score.bpmAtMeasureByPosition(
self.measurePosition())
if bpm == 0:
bpm = 120
self._qScore.sendFsmEvent(SetBpmEvent(self.measurePosition(), bpm))
def setPlaying(self, onOff):
self._playing = onOff
self.update()
def setNextToPlay(self, onOff):
self._nextToPlay = onOff
self.update()
def setDragHighlight(self, onOff):
self._dragHighlight = onOff
self.update()
def noteAt(self, np):
return self._measure.noteAt(np.noteTime, np.drumIndex)
def alternateText(self):
return self._measure.alternateText
def setPotentials(self, notes=None, head=None):
if notes is None:
newNotes = []
self._potentialDrum = None
else:
newNotes = [np.noteTime for np in notes]
self._potentialDrum = notes[0].drumIndex
if newNotes != self._potentials:
self._potentials = newNotes
self._potentialSet = set(self._potentials)
self._potentialHead = head
self.update()
def setSticking(self, point, above):
noteTime = self._getNoteTime(point)
if not self._measure.hasAnyNoteAt(noteTime):
return
notePos = self.makeNotePosition(noteTime, 0)
self._qScore.sendFsmEvent(SetSticking(notePos, above))
|
#/******************************************************************************
# * Copyright (c) 2012 Jan Rheinländer <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ******************************************************************************/
import FreeCAD, FreeCADGui
import Part, Sketcher, PartDesignGui
import math
def makeVector(point):
if point.__class__ == FreeCAD.Vector:
return point
return FreeCAD.Vector(point.X, point.Y, point.Z)
class Hole():
"Hole feature"
App = FreeCAD
Gui = FreeCADGui
def __init__(self, feature):
self.feature = feature
self.feature.addProperty("App::PropertyString","HoleType","Hole","Type of hole").HoleType="Depth"
self.feature.addProperty("App::PropertyBool","Threaded","Hole","Threaded hole").Threaded=False
self.feature.addProperty("App::PropertyBool","Counterbore","Hole","Counterbore hole").Counterbore=False
self.feature.addProperty("App::PropertyBool","Countersink","Hole","Countersink hole").Countersink=False
self.feature.addProperty("App::PropertyString","Norm","Hole","Name of norm").Norm="Custom"
self.feature.addProperty("App::PropertyString","NormTolerance","Hole","Tolerance field of norm").NormTolerance="medium"
self.feature.addProperty("App::PropertyLength","NormDiameter","Hole","Nominal diameter of hole").NormDiameter=4.0
self.feature.addProperty("App::PropertyString", "ExtraNorm", "Hole", "Norm of bolt or washer used in hole").ExtraNorm="ISO 4762"
self.feature.addProperty("App::PropertyString", "NormThread", "Hole", "Norm of thread").NormThread="DIN 13-1"
self.feature.addProperty("App::PropertyString", "NormThreadFinish", "Hole", "Norm defining thread finish length").NormThreadFinish="DIN 76-2"
self.feature.addProperty("App::PropertyLength","Diameter","Hole","Diameter of hole").Diameter=5.0
self.feature.addProperty("App::PropertyLength","Depth","Hole","Depth of hole").Depth=8.0
self.feature.addProperty("App::PropertyLength","CounterboreDiameter","Hole","Diameter of counterbore").CounterboreDiameter=10.0
self.feature.addProperty("App::PropertyLength","CounterboreDepth","Hole","Depth of counterbore").CounterboreDepth=4.0
self.feature.addProperty("App::PropertyLength","CountersinkAngle","Hole","Angle of countersink").CountersinkAngle=45.0;
self.feature.addProperty("App::PropertyLength","ThreadLength","Hole","Length of thread").ThreadLength=5.0;
self.feature.addProperty("App::PropertyString","PositionType","Hole","Type of position references").PositionType="Linear"
self.feature.addProperty("App::PropertyLinkSub","Support","Hole","Support of hole feature").Support=None
self.feature.addProperty("App::PropertyLink","HoleGroove","Hole","Revolution feature creating the hole").HoleGroove=None
# Create new HoleGroove feature
body = FreeCADGui.activeView().getActiveObject("pdbody");
self.sketchaxis = self.feature.Document.addObject("PartDesign::Line", "HoleSketchAxis")
body.addFeature(self.sketchaxis)
self.Gui.ActiveDocument.hide(self.sketchaxis.Name)
self.sketchplane = self.feature.Document.addObject("PartDesign::Plane", "HoleSketchPlane")
self.sketchplane.References = (self.sketchaxis, "")
body.addFeature(self.sketchplane)
self.Gui.ActiveDocument.hide(self.sketchplane.Name)
self.sketch = self.feature.Document.addObject("Sketcher::SketchObject","HoleSketch")
self.sketch.Support = (self.sketchplane, ["front"])
body.addFeature(self.sketch)
self.Gui.ActiveDocument.hide(self.sketch.Name)
feature.HoleGroove = feature.Document.addObject("PartDesign::Groove","HoleGroove")
feature.HoleGroove.Angle = 360.0
feature.HoleGroove.Sketch = self.sketch
body.addFeature(feature.HoleGroove)
self.Gui.ActiveDocument.hide(feature.HoleGroove.Name)
self.feature.Proxy = self
self.oldCounterbore = False
self.oldCountersink = False
def execute(self, feature):
if feature.Support != None:
(support, element) = feature.Support
feature.Placement = feature.HoleGroove.Placement
shape = feature.HoleGroove.Shape.copy()
shape.Placement = FreeCAD.Placement()
feature.Shape = shape
self.Gui.ActiveDocument.hide(support.Name)
# Copy display properties from support
featview = feature.ViewObject
suppview = support.ViewObject
for p in suppview.PropertiesList:
if not p in ["DisplayMode","BoundingBox","Proxy","RootNode","Visibility"]:
if p in featview.PropertiesList:
val = getattr(suppview,p)
setattr(featview,p,val)
if suppview.DisplayMode in featview.listDisplayModes():
featview.DisplayMode = suppview.DisplayMode
if hasattr(suppview,"DiffuseColor") and hasattr(featview,"DiffuseColor"):
featview.DiffuseColor = suppview.DiffuseColor
def onChanged(self, fp, prop):
#self.App.Console.PrintMessage("Change property: " + str(prop) + "\n")
if fp is None or fp.Support is None:
return
if (prop == "HoleType" or prop == "Threaded" or prop == "Counterbore" or prop == "Countersink"
or prop == "Diameter" or prop == "Depth"
or prop == "CounterboreDiameter" or prop == "CounterboreDepth"
or prop == "CountersinkAngle"):
self.executeSketchChanged(fp)
fp.Document.recompute()
elif prop == "Support":
self.executePositionChanged(fp)
fp.Document.recompute()
def executePositionChanged(self, fp):
"Change the position of the hole"
if fp.Support is None:
return
plane = self.feature.HoleGroove.Sketch.Support[0]
# Get support (face)
(support, elementList) = fp.Support
face = eval("support.Shape." + elementList[0])
refs = plane.References
if len(refs) == 0:
return
axis = plane.References[0][0]
firstTime = (len(axis.References) == 0)
if firstTime:
# Try to guess some references (using arcs or lines of the outer wire of the support face)
wire = face.OuterWire
firstLine = None
for e in wire.Edges:
if type(e.Curve) == Part.LineSegment:
if firstLine is None:
firstLine = e
firstDirection = e.Curve.EndPoint - e.Curve.StartPoint
else:
if firstDirection == e.Curve.EndPoint - e.Curve.StartPoint or firstDirection == e.Curve.StartPoint - e.Curve.EndPoint:
continue # Parallel edges
allEdges = support.Shape.Edges
firstLineIndex = -1
secondLineIndex = -1
for i in range(len(allEdges)):
try:
if type(allEdges[i].Curve) != Part.LineSegment:
continue
if (allEdges[i].Curve.StartPoint == firstLine.Curve.StartPoint and allEdges[i].Curve.EndPoint == firstLine.Curve.EndPoint) or (allEdges[i].Curve.EndPoint == firstLine.Curve.StartPoint and allEdges[i].Curve.StartPoint == firstLine.Curve.EndPoint):
firstLineIndex = i
elif (allEdges[i].Curve.StartPoint == e.Curve.StartPoint and allEdges[i].Curve.EndPoint == e.Curve.EndPoint) or (allEdges[i].Curve.EndPoint == e.Curve.StartPoint and allEdges[i].Curve.StartPoint == e.Curve.EndPoint):
secondLineIndex = i
if (firstLineIndex > -1) and (secondLineIndex > -1):
break
except:
# Unknown curvetype GeomAbs_OtherCurve
continue
axis.References = [(support, elementList[0]), (support, "Edge" + str(firstLineIndex+1)), (support, "Edge" + str(secondLineIndex+1))]
axis.Offset = 1.0
axis.Offset2 = 1.0
self.feature.PositionType = "Linear"
# Place the axis approximately in the center of the face
#p = face.CenterOfMass
#l1 = Part.LineSegment(firstLine.Curve)
#l2 = Part.LineSegment(e.Curve)
#axis.Offset = p.distanceToLine(l1.StartPoint, l1.EndPoint - l1.StartPoint)
#axis.Offset2 = p.distanceToLine(l1.StartPoint, l2.EndPoint - l2.StartPoint)
# TODO: Ensure that the hole is inside the face!
break
elif type(e.Curve) == Part.Circle:
allEdges = support.Shape.Edges
for i in range(len(allEdges)):
try:
if type(allEdges[i].Curve) != Part.Circle:
continue
c = allEdges[i].Curve
if c.Center == e.Curve.Center and c.Axis == e.Curve.Axis and c.Radius == e.Curve.Radius:
axis.References = [(support, "Edge" + str(i+1))]
self.feature.PositionType = "Coaxial"
break
except:
# Unknown curvetype
continue
elif type(e.Curve) == Part.ArcOfCircle:
allEdges = support.Shape.Edges
for i in range(len(allEdges)):
try:
if type(allEdges[i].Curve) != Part.ArcOfCircle:
continue
a = allEdges[i].Curve
if a.Center == e.Curve.Center and a.Axis == e.Curve.Axis and a.Radius == e.Curve.Radius and a.FirstParameter == e.Curve.FirstParameter and a.LastParameter == e.Curve.LastParameter:
axis.References = [(support, "Edge" + str(i+1))]
self.feature.PositionType = "Coaxial"
break
except:
continue
break
# Grab a point from the wire of the support face
axisbase = axis.Shape.Curve.StartPoint
axisdir = axis.Shape.Curve.EndPoint - axisbase
found = False
if not firstTime and len(refs) > 1:
# Try to keep the old point, to avoid the sketch plane jumping around
(obj, sub) = refs[1]
point = eval("support.Shape." + sub)
if point.Point.distanceToLine(axisbase, axisdir) > 1E-10: # TODO: Precision::Confusion()
found = True
if not found:
for p in face.OuterWire.Vertexes:
if p.Point.distanceToLine(axisbase, axisdir) > 1E-10: # TODO: Precision::Confusion()
point = p
found = True
break
if not found:
point = face.OuterWire.Vertexes[0] # Better this than nothing... and it can't actually happen, can it?
# Find the index of the point in the support shape
allVertexes = support.Shape.Vertexes
for v in range(len(allVertexes)):
if allVertexes[v].Point == point.Point:
# Use this point and the axis to define the sketch plane
if len(refs) < 2:
refs.append((support, "Vertex" + str(v+1)))
else:
refs[1] = (support, "Vertex" + str(v+1))
break
plane.References = refs
if firstTime:
fp.Document.recompute() # Update the Sketch Placement property
self.executeSketchChanged(fp) # Build the sketch of the hole
fp.Document.recompute()
else:
self.executeSketchChanged(fp) # Update the sketch of the hole
self.setHoleDirection(fp)
def setHoleDirection(self, feature):
# Make sure the hole goes into the material, not out of it
sketch = feature.HoleGroove.Sketch
axis = sketch.Support[0].References[0][0]
axisbase = axis.Shape.Curve.StartPoint
axisdir = axis.Shape.Curve.EndPoint - axisbase
p1 = None
p2 = None
for v in sketch.Shape.Vertexes:
# Find the two sketch vertices that are on the sketch axis
if v.Point.distanceToLine(axisbase, axisdir) < 1E-10: # TODO: use Precision::Confusion()
if p1 is None:
p1 = v.Point
else:
p2 = v.Point
break
if p1 is not None and p2 is not None:
(support, elementList) = feature.Support
face = eval("support.Shape." + elementList[0])
plane = face.Surface
if type(plane) != Part.Plane:
return
# Find the vertex that is on the top of the hole
if p1.distanceToPlane(plane.Position, plane.Axis) < 1E-10:
top = p1
dir = p2 - p1
else:
top = p2
dir = p1 - p2
if not support.Shape.isInside(top + dir.multiply(1E-8), 1E-10, False):
# Toggle the angle
angle = sketch.Constraints[12].Value
if angle == math.pi:
sketch.setDatum(12, 0.0)
else:
sketch.setDatum(12, math.pi)
def executeSketchChanged(self, fp):
"Change the sketch shape of the hole"
if self.feature.HoleGroove is None:
return
if fp.HoleType == "Thru":
# TODO: Make this more stable
length = 1E+4
else:
length = fp.Depth
radius = fp.Diameter / 2.0
if fp.Counterbore:
self.createOrUpdateCounterboreSketch(fp, length, radius)
elif fp.Countersink:
self.createOrUpdateCountersinkSketch(fp, length, radius)
else:
self.createOrUpdateStandardSketch(fp, length, radius)
def createOrUpdateStandardSketch(self, fp, depth, radius):
(support, elements) = fp.Support
if fp.HoleGroove.Sketch.GeometryCount == 0:
#FreeCAD.Console.PrintMessage("Standard sketch\n")
# New sketch
sketch = fp.HoleGroove.Sketch
axis = sketch.Support[0].References[0][0]
# Geo -1,1 is the origin (Point)
# Geo -1 is the X-axis
# Geo -2 is the Y-axis
# First external geometry is -3
sketch.addExternal(axis.Name,"LineSegment") # Geo -3: Datum axis
sketch.addExternal(support.Name, elements[0]) # Geo -4: Support face
# Note: Creating the sketch first with depth = 100.0 and then changing the constraint later seems to be more stable
tempDepth = 100.0
# Build the sketch
sketch.addGeometry(Part.LineSegment(self.App.Vector(10.0,50.0,0),self.App.Vector(10.0,-50.0,0))) # Geo0: Rotation axis
sketch.toggleConstruction(0)
sketch.addGeometry(Part.LineSegment(self.App.Vector(10.0,-10.0,0),self.App.Vector(10.0,-30.0,0))) # Geo1: Vertical axis of hole
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,0))# Datum0
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,2,0))# Datum1
sketch.addGeometry(Part.LineSegment(self.App.Vector(10.0,-10.0,0),self.App.Vector(20.0,-10.0,0))) # Geo2: Top of hole
sketch.addConstraint(Sketcher.Constraint('Coincident',1,1,2,1)) # Datum2
sketch.addConstraint(Sketcher.Constraint('Perpendicular',2, 1)) # Datum3
sketch.addGeometry(Part.LineSegment(self.App.Vector(20.0,-10.0,0),self.App.Vector(20.0,-25.0,0))) # Geo3: Vertical mantle of hole
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1)) # temporary
sketch.addConstraint(Sketcher.Constraint('Parallel',3, 1)) # Datum4
sketch.addConstraint(Sketcher.Constraint('Distance',3,2,1, 10.0)) # Datum5: Radius
sketch.addConstraint(Sketcher.Constraint('Distance',3,2,2, 15.0)) # Datum6: Depth
sketch.addGeometry(Part.LineSegment(self.App.Vector(10.0,-30.0,0),self.App.Vector(20.0,-25.0,0))) # Geo4: 118 degree tip angle
sketch.addConstraint(Sketcher.Constraint('Coincident',4,1,1,2)) # Datum7
sketch.addConstraint(Sketcher.Constraint('Coincident',4,2,3,2)) # Datum8
# TODO: The tip angle of 118 degrees is for steel only. It should be taken from Part material data
# (as soon as that is implemented)
sketch.addConstraint(Sketcher.Constraint('Angle',4,1,1,2, 118.0/2.0 * math.pi / 180.0)) # Datum9
# Locate at the intersection of the two external geometries
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,-3))# Datum10
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,-4))# Datum11
sketch.addConstraint(Sketcher.Constraint('Angle',0,1,-3, 1, 0.0))# Datum12
# This datum is specific for this holetype, so move it to the last position
sketch.delConstraint(4)
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1)) # Datum13
fp.HoleGroove.ReferenceAxis = (sketch,['Axis0'])
if self.oldCounterbore == True:
# Remove counterbore from existing sketch
#FreeCAD.Console.PrintMessage("Counter to Standard sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.delConstraint(19)
sketch.delConstraint(18)
sketch.delConstraint(17)
sketch.delConstraint(16)
sketch.delConstraint(15)
sketch.delConstraint(14)
sketch.delConstraint(13)
sketch.delGeometry(6)
sketch.delGeometry(5)
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1)) # Datum13
elif self.oldCountersink == True:
# Remove countersink from existing sketch
#FreeCAD.Console.PrintMessage("Sink to Standard sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.delConstraint(16)
sketch.delConstraint(15)
sketch.delConstraint(14)
sketch.delConstraint(13)
sketch.delGeometry(5)
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1)) # Datum13
else:
# Update existing standard sketch
#FreeCAD.Console.PrintMessage("Update Standard sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.setDatum(5, radius)
sketch.setDatum(6, depth)
if sketch.ExternalGeometry[1] != (support, elements[0]):
# Update the external geometry references
angle = sketch.Constraints[12].Value
sketch.delConstraint(13)
sketch.delConstraint(12)
sketch.delConstraint(11)
sketch.delExternal(1)
sketch.addExternal(support.Name, elements[0]) # Geo -4: Support face
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,-4))# Datum11
sketch.addConstraint(Sketcher.Constraint('Angle',0,1,-3, 1, angle))# Datum12
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1)) # Datum13
self.setHoleDirection(fp)
self.oldCounterbore = False
self.oldCountersink = False
def createOrUpdateCounterboreSketch(self, fp, depth, radius):
cradius = fp.CounterboreDiameter / 2.0
cdepth = fp.CounterboreDepth
(support, elements) = fp.Support
if self.oldCounterbore == True:
# Update properties of existing counterbore sketch
#FreeCAD.Console.PrintMessage("Update to Counterbore sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.setDatum(5, radius)
sketch.setDatum(6, depth)
sketch.setDatum(13, cradius)
sketch.setDatum(15, cdepth)
if sketch.ExternalGeometry[1] != (support, elements[0]):
# Update the external geometry references
angle = sketch.Constraints[12].Value
sketch.delConstraint(19)
sketch.delConstraint(18)
sketch.delConstraint(17)
sketch.delConstraint(16)
sketch.delConstraint(15)
sketch.delConstraint(14)
sketch.delConstraint(13)
sketch.delConstraint(12)
sketch.delConstraint(11)
sketch.delExternal(1)
sketch.addExternal(support.Name, elements[0]) # Geo -4: Support face
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,-4))# Datum11
sketch.addConstraint(Sketcher.Constraint('Angle',0,1,-3, 1, angle))# Datum12
sketch.addConstraint(Sketcher.Constraint('Distance',2, cradius)) # Datum13
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,5,1)) # Datum14
sketch.addConstraint(Sketcher.Constraint('Distance',3, 1, 2, cdepth)) # Datum15
sketch.addConstraint(Sketcher.Constraint('Parallel',5, 1)) # Datum16
sketch.addConstraint(Sketcher.Constraint('Coincident',5,2,6,1)) # Datum17
sketch.addConstraint(Sketcher.Constraint('Perpendicular',6, -3)) # Datum18
sketch.addConstraint(Sketcher.Constraint('Coincident',6,2,3,1)) # Datum19
else:
# Change standard to counterbore in existing sketch
#FreeCAD.Console.PrintMessage("Standard to Counterbore sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.delConstraint(13)
sketch.addConstraint(Sketcher.Constraint('Distance',2, cradius)) # Datum13
p2 = sketch.Geometry[2].EndPoint
sketch.addGeometry(Part.LineSegment(p2,self.App.Vector(p2.x,p2.y-20.0,0))) # Geo5: Vertical mantle of counterbore
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,5,1)) # Datum14
sketch.addConstraint(Sketcher.Constraint('Distance',3, 1, 2, cdepth)) # Datum15
sketch.addConstraint(Sketcher.Constraint('Parallel',5, 1)) # Datum16
p3 = sketch.Geometry[3].StartPoint
sketch.addGeometry(Part.LineSegment(self.App.Vector(p2.x,p2.y-20.0, 0),p3)) # Geo6: bottom of counterbore
sketch.addConstraint(Sketcher.Constraint('Coincident',5,2,6,1)) # Datum17
sketch.addConstraint(Sketcher.Constraint('Perpendicular',6, -3)) # Datum18
sketch.addConstraint(Sketcher.Constraint('Coincident',6,2,3,1)) # Datum19
self.setHoleDirection(fp)
self.oldCounterbore = True
self.oldCountersink = False
def createOrUpdateCountersinkSketch(self, fp, depth, radius):
sradius = fp.CounterboreDiameter / 2.0
sangle = fp.CountersinkAngle * math.pi / 180.0
(support, elements) = fp.Support
if self.oldCountersink == True:
# Update properties of existing countersink sketch
#FreeCAD.Console.PrintMessage("Update to Countersink sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.setDatum(5, radius)
sketch.setDatum(6, depth)
sketch.setDatum(13, sradius)
sketch.setDatum(15, sangle)
if sketch.ExternalGeometry[1] != (support, elements[0]):
# Update the external geometry references
angle = sketch.Constraints[12].Value
sketch.delConstraint(16)
sketch.delConstraint(15)
sketch.delConstraint(14)
sketch.delConstraint(13)
sketch.delConstraint(12)
sketch.delConstraint(11)
sketch.delExternal(1)
sketch.addExternal(support.Name, elements[0]) # Geo -4: Support face
sketch.addConstraint(Sketcher.Constraint('PointOnObject',1,1,-4))# Datum11
sketch.addConstraint(Sketcher.Constraint('Angle',0,1,-3, 1, angle))# Datum12
sketch.addConstraint(Sketcher.Constraint('Distance',2, sradius)) # Datum13
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,5,1)) # Datum14
sketch.addConstraint(Sketcher.Constraint('Angle',5,2, 1,2, sangle)) # Datum15
sketch.addConstraint(Sketcher.Constraint('Coincident',3,1,5,2)) # Datum16
else:
# Change standard to countersink in existing sketch
#FreeCAD.Console.PrintMessage("Standard to Countersink sketch\n")
sketch = fp.HoleGroove.Sketch
sketch.delConstraint(13)
sketch.addConstraint(Sketcher.Constraint('Distance',2, sradius)) # Datum13
p2 = sketch.Geometry[2].EndPoint
sketch.addGeometry(Part.LineSegment(p2,self.App.Vector(p2.x,p2.y-20.0,0))) # Geo5: Chamfer of countersink
sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,5,1)) # Datum14
sketch.addConstraint(Sketcher.Constraint('Angle',5,2, 1,2, sangle)) # Datum15
sketch.addConstraint(Sketcher.Constraint('Coincident',3,1,5,2)) # Datum16
self.setHoleDirection(fp)
self.oldCounterbore = False
self.oldCountersink = True
|
import logging
import time
from typing import Any, Callable, Optional
from django.conf import settings
from django.core.management.base import BaseCommand, CommandParser
from zerver.lib.rate_limiter import RateLimitedUser, \
client, max_api_calls, max_api_window
from zerver.models import get_user_profile_by_id
class Command(BaseCommand):
help = """Checks redis to make sure our rate limiting system hasn't grown a bug
and left redis with a bunch of data
Usage: ./manage.py [--trim] check_redis"""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('-t', '--trim',
dest='trim',
default=False,
action='store_true',
help="Actually trim excess")
def _check_within_range(self, key: str, count_func: Callable[[], int],
trim_func: Optional[Callable[[str, int], None]]=None) -> None:
user_id = int(key.split(':')[1])
user = get_user_profile_by_id(user_id)
entity = RateLimitedUser(user)
max_calls = max_api_calls(entity)
age = int(client.ttl(key))
if age < 0:
logging.error("Found key with age of %s, will never expire: %s" % (age, key,))
count = count_func()
if count > max_calls:
logging.error("Redis health check found key with more elements \
than max_api_calls! (trying to trim) %s %s" % (key, count))
if trim_func is not None:
client.expire(key, max_api_window(entity))
trim_func(key, max_calls)
def handle(self, *args: Any, **options: Any) -> None:
if not settings.RATE_LIMITING:
print("This machine is not using redis or rate limiting, aborting")
exit(1)
# Find all keys, and make sure they're all within size constraints
wildcard_list = "ratelimit:*:*:list"
wildcard_zset = "ratelimit:*:*:zset"
trim_func = lambda key, max_calls: client.ltrim(key, 0, max_calls - 1) # type: Optional[Callable[[str, int], None]]
if not options['trim']:
trim_func = None
lists = client.keys(wildcard_list)
for list_name in lists:
self._check_within_range(list_name,
lambda: client.llen(list_name),
trim_func)
zsets = client.keys(wildcard_zset)
for zset in zsets:
now = time.time()
# We can warn on our zset being too large, but we don't know what
# elements to trim. We'd have to go through every list item and take
# the intersection. The best we can do is expire it
self._check_within_range(zset,
lambda: client.zcount(zset, 0, now),
lambda key, max_calls: None)
|
#!/usr/bin/env python
"""
Reads a list of Z3 API header files and
generate the constant declarations need
by one or more Z3 language bindings
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("api_files", nargs="+")
parser.add_argument("--z3py-output-dir", dest="z3py_output_dir", default=None)
pargs = parser.parse_args(args)
if not mk_genfile_common.check_files_exist(pargs.api_files):
logging.error('One or more API files do not exist')
return 1
count = 0
if pargs.z3py_output_dir:
if not mk_genfile_common.check_dir_exists(pargs.z3py_output_dir):
return 1
output = mk_genfile_common.mk_z3consts_py_internal(pargs.api_files, pargs.z3py_output_dir)
logging.info('Generated "{}"'.format(output))
count += 1
if count == 0:
logging.info('No files generated. You need to specific an output directory'
' for the relevant langauge bindings')
# TODO: Add support for other bindings
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import re
import warnings
import inspect
import numpy as np
import matplotlib
import matplotlib.cbook as cbook
from matplotlib.cbook import mplDeprecation
from matplotlib import docstring, rcParams
from .transforms import (Bbox, IdentityTransform, TransformedBbox,
TransformedPath, Transform)
from .path import Path
# Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependant renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
def before(artist, renderer):
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
def after(artist, renderer):
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
# the axes class has a second argument inframe for its draw method.
def draw_wrapper(artist, renderer, *args, **kwargs):
before(artist, renderer)
draw(artist, renderer, *args, **kwargs)
after(artist, renderer)
# "safe wrapping" to exactly replicate anything we haven't overridden above
draw_wrapper.__name__ = draw.__name__
draw_wrapper.__dict__ = draw.__dict__
draw_wrapper.__doc__ = draw.__doc__
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self._mouseover = False
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes.mouseover_set.discard(self)
# mark the axes as stale
self.axes.stale = True
# decouple the artist from the axes
self.axes = None
_ax_flag = True
if self.figure:
self.figure = None
if not _ax_flag:
self.figure = True
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
warnings.warn(_get_axes_msg, mplDeprecation, stacklevel=1)
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
"""
warnings.warn(_get_axes_msg, mplDeprecation, stacklevel=1)
return self.axes
@property
def axes(self):
"""
The :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
"""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and
(self._axes is not None and new_axes != self._axes)):
raise ValueError("Can not reset the axes. You are "
"probably trying to re-use an artist "
"in more than one Axes which is not "
"supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
return new_axes
@property
def stale(self):
"""
If the artist is 'stale' and needs to be re-drawn for the output to
match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self.get_animated():
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if six.callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if mouseevent.inaxes is None or ax is None or \
mouseevent.inaxes == ax:
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
# if this is a no-op just return
if self.figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and `fig` being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self.figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self.figure = fig
if self.figure and self.figure is not self:
self.pchanged()
self.stale = True
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
print(type(path), type(transform))
raise TypeError("Invalid arguments to set_clip_path")
# this may result in the callbacks being hit twice, but grantees they
# will be hit at least once
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter fuction.
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
if self._animated != b:
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k, v in six.iteritems(props):
if k in ['axes']:
setattr(self, k, v)
else:
func = getattr(self, 'set_' + k, None)
if func is None or not six.callable(func):
raise AttributeError('Unknown property %s' % k)
func(v)
changed = True
self.eventson = store
if changed:
self.pchanged()
self.stale = True
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
self.stale = True
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
self.stale = True
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.pchanged()
self.stale = True
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""
A property batch setter. Pass *kwargs* to set properties.
Will handle property name collisions (e.g., if both
'color' and 'facecolor' are specified, the property
with higher priority gets set last).
"""
ret = []
for k, v in sorted(kwargs.items(), reverse=True):
k = k.lower()
funcName = "set_%s" % k
func = getattr(self, funcName, None)
if func is None:
raise TypeError('There is no %s property "%s"' %
(self.__class__.__name__, k))
ret.extend([func(v)])
return ret
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in
c.findobj(matchfunc, include_self=False)
if matchfunc(thisc)])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Get the cursor data for a given event.
"""
return None
def format_cursor_data(self, data):
"""
Return *cursor data* string formatted.
"""
try:
data[0]
except (TypeError, IndexError):
data = [data]
return ', '.join('{:0.3g}'.format(item) for item in data if
isinstance(item, (np.floating, np.integer, int, float)))
@property
def mouseover(self):
return self._mouseover
@mouseover.setter
def mouseover(self, val):
val = bool(val)
self._mouseover = val
ax = self.axes
if ax:
if val:
ax.mouseover_set.add(self)
else:
ax.mouseover_set.discard(self)
class ArtistInspector(object):
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of :class:`Artists`.
If a sequence is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o) and len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and six.callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
o = getattr(self.o, name)
if not six.callable(o):
continue
if six.PY2:
nargs = len(inspect.getargspec(o)[0])
else:
nargs = len(inspect.getfullargspec(o)[0])
if nargs < 2:
continue
func = o
if self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target
in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) +
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and six.callable(getattr(o, name))]
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
d = self.properties()
names = list(six.iterkeys(d))
names.sort()
lines = []
for name in names:
val = d[name]
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: e.g., :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an '
'matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc
for thisc
in c.findobj(matchfunc)
if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. e.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(obj)
if len(kwargs) == 0 and len(args) == 0:
print('\n'.join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if not cbook.iterable(obj):
objs = [obj]
else:
objs = cbook.flatten(obj)
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args) - 1, 2):
funcvals.append((args[i], args[i + 1]))
funcvals.extend(sorted(kwargs.items(), reverse=True))
ret = []
for o in objs:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s" % s
func = getattr(o, funcName, None)
if func is None:
raise TypeError('There is no %s property "%s"' %
(o.__class__.__name__, s))
ret.extend([func(val)])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
_get_axes_msg = """This has been deprecated in mpl 1.5, please use the
axes property. A removal date has not been set."""
|
from iocbuilder import AutoSubstitution, SetSimulation, Device, Architecture, ModuleBase
from iocbuilder.arginfo import *
from iocbuilder.modules.motor import basic_asyn_motor, MotorRecord
from iocbuilder.modules.tpmac import DeltaTau, DeltaTauCommsPort
from iocbuilder.modules.asyn import Asyn, AsynPort
from iocbuilder.modules.calc import Calc
from iocbuilder.modules.busy import Busy
from iocbuilder.modules.seq import Seq
from iocbuilder.modules.genSub import GenSub
from iocbuilder.modules.streamDevice import AutoProtocol
import os, sys
class PmacUtil(Device):
Dependencies = (GenSub,Seq)
LibFileList = ["pmacUtil"]
DbdFileList = ["pmacUtilSupport"]
AutoInstantiate = True
class autohome(AutoSubstitution):
Dependencies = (Calc,)
TemplateFile = 'autohome.template'
autohome.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor controller comms port", DeltaTauCommsPort)
def add_basic(cls):
"""Convenience function to add basic_asyn_motor attributes to a class that
includes it via an msi include statement rather than verbatim"""
cls.Arguments = basic_asyn_motor.Arguments + [x for x in cls.Arguments if x not in basic_asyn_motor.Arguments]
cls.ArgInfo = basic_asyn_motor.ArgInfo + cls.ArgInfo.filtered(without=basic_asyn_motor.ArgInfo.Names())
cls.Defaults.update(basic_asyn_motor.Defaults)
cls.guiTags = basic_asyn_motor.guiTags
return cls
class eloss_kill_autohome_records(AutoSubstitution):
WarnMacros = False
TemplateFile = "eloss_kill_autohome_records.template"
def add_eloss_kill_autohome(cls):
"""Convenience function to add eloss_kill_autohome_records attributes to a class that
includes it via an msi include statement rather than verbatim"""
cls.Arguments = eloss_kill_autohome_records.Arguments + [x for x in cls.Arguments if x not in eloss_kill_autohome_records.Arguments]
cls.ArgInfo = eloss_kill_autohome_records.ArgInfo + cls.ArgInfo.filtered(without=eloss_kill_autohome_records.ArgInfo.Names())
cls.Defaults.update(eloss_kill_autohome_records.Defaults)
cls.guiTags = eloss_kill_autohome_records.guiTags
return cls
@add_basic
@add_eloss_kill_autohome
class dls_pmac_asyn_motor_no_coord(AutoSubstitution, AutoProtocol, MotorRecord):
WarnMacros = False
TemplateFile = 'dls_pmac_asyn_motor_no_coord.template'
ProtocolFiles = ['pmac.proto']
Dependencies = (Busy,)
dls_pmac_asyn_motor_no_coord.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor controller", DeltaTau)
dls_pmac_asyn_motor_no_coord.ArgInfo.descriptions["SPORT"] = Ident("Delta tau motor controller comms port", DeltaTauCommsPort)
@add_basic
@add_eloss_kill_autohome
class dls_pmac_patch_asyn_motor(AutoSubstitution, AutoProtocol, MotorRecord):
WarnMacros = False
TemplateFile = 'dls_pmac_patch_asyn_motor.template'
ProtocolFiles = ['pmac.proto']
Dependencies = (Busy,)
dls_pmac_asyn_motor_no_coord.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor controller", DeltaTau)
dls_pmac_asyn_motor_no_coord.ArgInfo.descriptions["SPORT"] = Ident("Delta tau motor controller comms port", DeltaTauCommsPort)
try:
from iocbuilder.modules.pmacCoord import PmacCoord, CS
@add_basic
@add_eloss_kill_autohome
class dls_pmac_asyn_motor(AutoSubstitution, AutoProtocol, MotorRecord):
WarnMacros = False
TemplateFile = 'dls_pmac_asyn_motor.template'
ProtocolFiles = ['pmac.proto']
Dependencies = (Busy,PmacCoord)
dls_pmac_asyn_motor.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor controller", DeltaTau)
dls_pmac_asyn_motor.ArgInfo.descriptions["SPORT"] = Ident("Delta tau motor controller comms port", DeltaTauCommsPort)
@add_basic
class dls_pmac_cs_asyn_motor(AutoSubstitution):
WarnMacros = False
TemplateFile = 'dls_pmac_cs_asyn_motor.template'
Dependencies = (Busy,)
dls_pmac_cs_asyn_motor.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor CS", CS)
except ImportError:
print "# pmacCoord not found, dls_pmac_asyn_motor will not be available"
class _pmacStatusAxis(AutoSubstitution, AutoProtocol):
ProtocolFiles = ['pmac.proto']
TemplateFile = 'pmacStatusAxis.template'
class pmacStatus(AutoSubstitution, AutoProtocol):
Dependencies = (PmacUtil,)
ProtocolFiles = ['pmac.proto']
TemplateFile = 'pmacStatus.template'
def __init__(self, **args):
# init the super class
self.__super.__init__(**args)
self.axes = []
NAXES = int(args["NAXES"])
assert NAXES in range(1,33), "Number of axes (%d) must be in range 1..32" % NAXES
# for each axis
for i in range(1, NAXES + 1):
args["AXIS"] = i
# make a _pmacStatusAxis instance
self.axes.append(
_pmacStatusAxis(
**filter_dict(args, _pmacStatusAxis.ArgInfo.Names())))
pmacStatus.ArgInfo.descriptions["PORT"] = Ident("Delta tau motor controller comms port", DeltaTauCommsPort)
class gather(AutoSubstitution, Device):
'''Setup PMAC or Geobrick gathering template'''
Dependencies = (PmacUtil,)
def PostIocInitialise(self):
if Architecture() == "linux-x86":
print 'seq(gather,"P=%(P)s,M=%(M)s")' % self.args
else:
print 'seq &gather,"P=%(P)s,M=%(M)s"' % self.args
# Substitution attributes
TemplateFile = 'gather.template'
class positionCompare(AutoSubstitution, AutoProtocol):
'''Setup position compare on a delta tau. Needs PLC_position_compare.pmc'''
Dependencies = (Calc,)
ProtocolFiles = ['pmac.proto']
TemplateFile = 'positionCompare.template'
class positionCompare_nojitter(AutoSubstitution, AutoProtocol):
'''Setup position compare on a delta tau. Needs
PLC_position_compare_nojitter.pmc'''
Dependencies = (Calc,)
ProtocolFiles = ['pmac.proto']
TemplateFile = 'positionCompare_nojitter.template'
class pmacVariableWrite(AutoSubstitution):
'''Couple of records to write variables to a Delta tau'''
Dependencies = (Calc, Asyn)
TemplateFile = 'pmacVariableWrite.template'
class pmacDeferMoves(AutoSubstitution):
TemplateFile = 'pmacDeferMoves.template'
|
"""
Plotting utilities.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.constants import c
from matplotlib.artist import Artist
from matplotlib.colors import LinearSegmentedColormap
import astropy.units as u
import numpy as np
__all__ = ['get_nrows_ncols', 'truncate_colormap', 'GrowFilter',
'FilteredArtistList']
c_kms = c.to(u.km / u.s).value # speed of light in km/s
def get_nrows_ncols(npar):
"""
Optimises the subplot grid layout.
Parameters
----------
npar : int
Number of subplots.
Returns
-------
nrows : int
Number of figure rows.
ncols : int
Number of figure columns.
"""
ncols = max(int(np.sqrt(npar)), 1)
nrows = ncols
while npar > (nrows * ncols):
nrows += 1
return nrows, ncols
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
Truncates a colourmap.
Parameters
----------
cmap : `matplotlib.colors.LinearSegmentedColormap`
Input colourmap.
minval, maxval : float
Interval to sample (minval >= 0, maxval <= 1)
n : int
Sampling density.
Returns
-------
new_cmap : `matplotlib.colors.LinearSegmentedColormap`
Truncated colourmap.
"""
new_cmap = LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(
n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def smooth1d(x, window_len):
s = np.r_[2 * x[0] - x[window_len:1:-1], x,
2 * x[-1] - x[-1:-window_len:-1]]
w = np.hanning(window_len)
y = np.convolve(w / w.sum(), s, mode='same')
return y[window_len - 1:-window_len + 1]
def smooth2d(a, sigma=3):
window_len = max(int(sigma), 3) * 2 + 1
a1 = np.array([smooth1d(x, window_len) for x in np.asarray(a)])
a2 = np.transpose(a1)
a3 = np.array([smooth1d(x, window_len) for x in a2])
a4 = np.transpose(a3)
return a4
class BaseFilter(object):
@staticmethod
def prepare_image(src_image, pad):
ny, nx, depth = src_image.shape
padded_src = np.zeros([pad * 2 + ny, pad * 2 + nx, depth], dtype='d')
padded_src[pad:-pad, pad:-pad, :] = src_image[:, :, :]
return padded_src
@staticmethod
def get_pad():
return 0
def __call__(self, im, dpi):
pad = self.get_pad()
padded_src = self.prepare_image(im, pad)
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class GrowFilter(BaseFilter):
def __init__(self, pixels, color=None):
self.pixels = pixels
if color is None:
self.color = (1, 1, 1)
else:
self.color = color
def __call__(self, im, dpi):
pad = self.pixels
ny, nx, depth = im.shape
new_im = np.empty([pad * 2 + ny, pad * 2 + nx, depth], dtype='d')
alpha = new_im[:, :, 3]
alpha.fill(0)
alpha[pad:-pad, pad:-pad] = im[:, :, -1]
alpha2 = np.clip(
smooth2d(alpha, int(self.pixels / 72. * dpi)) * 5, 0, 1)
new_im[:, :, -1] = alpha2
new_im[:, :, :-1] = self.color
offsetx, offsety = -pad, -pad
return new_im, offsetx, offsety
class FilteredArtistList(Artist):
def __init__(self, artist_list, filter0):
self._artist_list = artist_list
self._filter = filter0
super(FilteredArtistList, self).__init__()
def draw(self, renderer, *args, **kwargs):
renderer.start_rasterizing()
renderer.start_filter()
for a in self._artist_list:
a.draw(renderer, *args, **kwargs)
renderer.stop_filter(self._filter)
renderer.stop_rasterizing()
|
from direct.showbase import DirectObject
from otp.otpbase import OTPGlobals
import sys
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from otp.otpbase import OTPLocalizer
class ChatInputNormal(DirectObject.DirectObject):
ExecNamespace = None
def __init__(self, chatMgr):
self.chatMgr = chatMgr
self.normalPos = Vec3(-1.083, 0, 0.804)
self.whisperPos = Vec3(0.0, 0, 0.71)
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
wantHistory = 0
if __dev__:
wantHistory = 1
self.wantHistory = config.GetBool('want-chat-history', wantHistory)
self.history = ['']
self.historySize = config.GetInt('chat-history-size', 10)
self.historyIndex = 0
return
def typeCallback(self, extraArgs):
messenger.send('enterNormalChat')
def delete(self):
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
self.chatFrame.destroy()
del self.chatFrame
del self.chatButton
del self.cancelButton
del self.chatEntry
del self.whisperLabel
del self.chatMgr
def activateByData(self, whisperAvatarId = None, toPlayer = 0):
self.toPlayer = toPlayer
self.whisperAvatarId = whisperAvatarId
if self.whisperAvatarId:
self.whisperAvatarName = base.talkAssistant.findName(self.whisperAvatarId, self.toPlayer)
self.chatFrame.setPos(self.whisperPos)
self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperAvatarName
self.whisperLabel.show()
else:
self.chatFrame.setPos(self.normalPos)
self.whisperLabel.hide()
self.chatEntry['focus'] = 1
self.chatFrame.show()
if self.wantHistory:
self.accept('arrow_up-up', self.getPrevHistory)
self.accept('arrow_down-up', self.getNextHistory)
return True
def deactivate(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 0
self.chatFrame.hide()
self.whisperLabel.hide()
base.win.closeIme()
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
def checkForOverRide(self):
return False
def sendChat(self, text):
if self.checkForOverRide():
self.chatEntry.enterText('')
return
self.deactivate()
self.chatMgr.fsm.request('mainMenu')
if text:
if self.toPlayer:
if self.whisperAvatarId:
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
elif self.whisperAvatarId:
self.chatMgr.sendWhisperString(text, self.whisperAvatarId)
self.whisperAvatarName = None
self.whisperAvatarId = None
else:
if self.chatMgr.execChat:
if text[0] == '>':
text = self.__execMessage(text[1:])
base.localAvatar.setChatAbsolute(text, CFSpeech | CFTimeout)
return
base.talkAssistant.sendOpenTalk(text)
if self.wantHistory:
self.addToHistory(text)
return
def chatOverflow(self, overflowText):
self.sendChat(self.chatEntry.get())
def __execMessage(self, message):
if not ChatInputNormal.ExecNamespace:
ChatInputNormal.ExecNamespace = {}
exec 'from pandac.PandaModules import *' in globals(), self.ExecNamespace
self.importExecNamespace()
try:
return str(eval(message, globals(), ChatInputNormal.ExecNamespace))
except SyntaxError:
try:
exec message in globals(), ChatInputNormal.ExecNamespace
return 'ok'
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
def cancelButtonPressed(self):
self.chatEntry.set('')
self.chatMgr.fsm.request('mainMenu')
def chatButtonPressed(self):
self.sendChat(self.chatEntry.get())
def importExecNamespace(self):
pass
def addToHistory(self, text):
self.history = [text] + self.history[:self.historySize - 1]
self.historyIndex = 0
def getPrevHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex += 1
self.historyIndex %= len(self.history)
def getNextHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex -= 1
self.historyIndex %= len(self.history)
def setPos(self, posX, posY = None, posZ = None):
if posX and posY and posZ:
self.chatFrame.setPos(posX, posY, posZ)
else:
self.chatFrame.setPos(posX)
|
# -*- coding: utf-8 -*-
from io import open
import os
import os.path
# 計算文字檔有幾行
def line_count(filepath, enc='utf8'):
count = 0
with open(filepath, mode='r', encoding=enc) as fin:
for line in fin:
if line.strip(): # 空白行不算
count += 1
return count
# 判斷文字編碼系統,從檔案開頭的前幾個位元組
def what_encoding_by_head(filepath):
enc = None
with open(filepath, 'rb') as fin:
data = fin.read(3)
if len(data) == 3: # Windows會在UTF-8檔案開頭加上BOM
if data[0] == 0xEF and data[1] == 0xBB and data[2] == 0xBF:
enc = 'utf8'
if len(data) >= 2: # UTF-16 BE與LE
if data[0] == 0xFE and data[1] == 0xFF:
enc = 'UTF-16BE'
elif data[0] == 0xFF and data[1] == 0xFE:
enc = 'UTF-16LE'
return enc
# 判斷文字編碼系統,從第一行或第二行,若含有'coding'字樣的話
def what_encoding_by_mark(filepath):
enc = 'ascii' # 預設為ASCII
mark = 'coding' # 編碼標記字樣
def extract_enc(line):
chars = ' :=-*' # 編碼標記字樣周圍無用的字元
return line.strip().lstrip(chars).rstrip(chars)
with open(filepath, mode='r', encoding=enc, errors='ignore') as fin:
for i in range(2): # 只讀兩行
line = fin.readline()
idx = line.find(mark) # 若含有編碼標記字樣
if idx != -1:
enc = extract_enc(line[idx+len(mark):])
break
return enc
# 判斷文字編碼系統
def what_encoding(filepath):
return what_encoding_by_head(filepath) or what_encoding_by_mark(filepath)
lc_total = 0 # 總行數
file_count = 0 # 被計算行數的檔案個數
for p in os.listdir('.'): # 2.x版應使用u'.'
if os.path.isfile(p) and p[-3:] == '.py':
try:
enc = what_encoding(p)
lc = line_count(p, enc)
lc_total += lc
file_count += 1
print(p, enc, lc)
except:
print(p, "can't count lines")
print('%d .py files totally have %d lines ' % (file_count, lc_total))
|
# Copyright (C) 2009 James Newton
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import types
import urwid
from xmppcli import log
class Tabs(urwid.Widget):
def __init__(self, widgets, status):
urwid.Widget.__init__(self)
self.widgets = widgets
self.status = status
self.selected = 0
self.setStatusName()
def append(self, widget):
self.widgets.append(widget)
self.setStatusName()
def prepend(self, widget):
self.widgets.append(widget)
self.setStatusName()
def remove(self, widget):
for index in range(len(self.widgets)):
if self.widgets[index] == widget:
del self.widgets[index]
break
self.setStatusName()
def cur(self):
return self.widgets[self.selected]
def next(self):
self.selected = self.selected + 1
if self.selected >= len(self.widgets):
self.selected = 0
self.setStatusName()
self._invalidate()
def prev(self):
self.selected = self.selected - 1
if self.selected < 0:
self.selected = len(self.widgets) - 1
self.setStatusName()
self._invalidate()
def setStatusName(self):
names = []
for idx in range(len(self.widgets)):
if idx == self.selected:
color = 'status-hilite'
else:
color = 'status'
names.append((color, '[' + self.widgets[idx].name + ']'))
names.append(" ")
self.status.setBuffName(names)
def select(self, index):
if type(index) is types.IntType:
self.selected = index
else:
for windex in range(len(self.widgets)):
if self.widgets[windex] is index:
self.selected = windex
break
else:
raise Exception("Invalid selection %s" % str(index))
self._invalidate()
def __getattr__(self, name):
return getattr(self.cur(), name)
|
# This file is part of export2hdf5
#
# Copyright 2016
# Andreas Henelius <[email protected]>,
# Finnish Institute of Occupational Health
#
# This code is released under the MIT License
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details.
"""
This module contains some convenience functions for
handling datasets (lists containg the data from a
recording using some device.
"""
def get_channels_in_set(dataset):
"""
Return list of all channels in the dataset.
The dataset is a list, where each element
represents a channel. Each channel is
a dictionary of the form
{"meta" : <dict with metadata>,
"data" : {"time" : [...], "<channelname" : [...] }}
"""
if not isinstance(dataset, list):
dataset = [dataset]
channels = []
for ch_data in dataset:
for ch_name in ch_data["data"].keys():
if ch_name not in channels:
channels += [ch_name]
ind = channels.index("time")
del channels[ind]
return channels
def read_text(fname):
"""
Read text data from a file.
Arguments:
- fname : the name of the file containing the data
Returns:
- a dictionary with the data
{"text" : <the text data as a string> }
"""
with open(fname, "r") as file:
data = file.read()
return {'text' : data}
|
from __future__ import print_function
import os
import gzip
import logging
from six.moves import cPickle as pickle
from importlib import import_module
from time import time
from weakref import WeakKeyDictionary
from email.utils import mktime_tz, parsedate_tz
from w3lib.http import headers_raw_to_dict, headers_dict_to_raw
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
logger = logging.getLogger(__name__)
class DummyPolicy(object):
def __init__(self, settings):
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_http_codes = [int(x) for x in settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')]
def should_cache_request(self, request):
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response, request):
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(self, response, request):
return True
def is_cached_response_valid(self, cachedresponse, response, request):
return True
class RFC2616Policy(object):
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings):
self.always_store = settings.getbool('HTTPCACHE_ALWAYS_STORE')
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_response_cache_controls = [to_bytes(cc) for cc in
settings.getlist('HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS')]
self._cc_parsed = WeakKeyDictionary()
def _parse_cachecontrol(self, r):
if r not in self._cc_parsed:
cch = r.headers.get(b'Cache-Control', b'')
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request):
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Any other is eligible for caching
return True
def should_cache_response(self, response, request):
# What is cacheable - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec14.9.1
# Response cacheability - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Never cache 304 (Not Modified) responses
elif response.status == 304:
return False
# Cache unconditionally if configured to do so
elif self.always_store:
return True
# Any hint on response expiration is good
elif b'max-age' in cc or b'Expires' in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
elif response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
elif response.status in (200, 203, 401):
return b'Last-Modified' in response.headers or b'ETag' in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
else:
return False
def is_cached_response_fresh(self, cachedresponse, request):
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if b'no-cache' in cc or b'no-cache' in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if b'max-stale' in ccreq and b'must-revalidate' not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq[b'max-stale']
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(self, cachedresponse, response, request):
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if b'must-revalidate' not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(self, request, cachedresponse):
if b'Last-Modified' in cachedresponse.headers:
request.headers[b'If-Modified-Since'] = cachedresponse.headers[b'Last-Modified']
if b'ETag' in cachedresponse.headers:
request.headers[b'If-None-Match'] = cachedresponse.headers[b'ETag']
def _get_max_age(self, cc):
try:
return max(0, int(cc[b'max-age']))
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(self, response, request, now):
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#410
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
# Try HTTP/1.0 Expires header
if b'Expires' in response.headers:
expires = rfc1123_to_epoch(response.headers[b'Expires'])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get(b'Last-Modified'))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute fresshness lifetime
return 0
def _compute_current_age(self, response, request, now):
# Reference nsHttpResponseHead::ComputeCurrentAge
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#366
currentage = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
if now > date:
currentage = now - date
if b'Age' in response.headers:
try:
age = int(response.headers[b'Age'])
currentage = max(currentage, age)
except ValueError:
pass
return currentage
class DbmCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.dbmodule = import_module(settings['HTTPCACHE_DBM_MODULE'])
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.db' % spider.name)
self.db = self.dbmodule.open(dbpath, 'c')
logger.debug("Using DBM cache storage in %(cachepath)s" % {'cachepath': dbpath}, extra={'spider': spider})
def close_spider(self, spider):
self.db.close()
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
self.db['%s_data' % key] = pickle.dumps(data, protocol=2)
self.db['%s_time' % key] = str(time())
def _read_data(self, spider, request):
key = self._request_key(request)
db = self.db
tkey = '%s_time' % key
if tkey not in db:
return # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return # expired
return pickle.loads(db['%s_data' % key])
def _request_key(self, request):
return request_fingerprint(request)
class FilesystemCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.use_gzip = settings.getbool('HTTPCACHE_GZIP')
self._open = gzip.open if self.use_gzip else open
def open_spider(self, spider):
logger.debug("Using filesystem cache storage in %(cachedir)s" % {'cachedir': self.cachedir},
extra={'spider': spider})
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return # not cached
rpath = self._get_request_path(spider, request)
with self._open(os.path.join(rpath, 'response_body'), 'rb') as f:
body = f.read()
with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f:
rawheaders = f.read()
url = metadata.get('response_url')
status = metadata['status']
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
rpath = self._get_request_path(spider, request)
if not os.path.exists(rpath):
os.makedirs(rpath)
metadata = {
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': time(),
}
with self._open(os.path.join(rpath, 'meta'), 'wb') as f:
f.write(to_bytes(repr(metadata)))
with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f:
pickle.dump(metadata, f, protocol=2)
with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(os.path.join(rpath, 'response_body'), 'wb') as f:
f.write(response.body)
with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(os.path.join(rpath, 'request_body'), 'wb') as f:
f.write(request.body)
def _get_request_path(self, spider, request):
key = request_fingerprint(request)
return os.path.join(self.cachedir, spider.name, key[0:2], key)
def _read_meta(self, spider, request):
rpath = self._get_request_path(spider, request)
metapath = os.path.join(rpath, 'pickled_meta')
if not os.path.exists(metapath):
return # not found
mtime = os.stat(metapath).st_mtime
if 0 < self.expiration_secs < time() - mtime:
return # expired
with self._open(metapath, 'rb') as f:
return pickle.load(f)
class LeveldbCacheStorage(object):
def __init__(self, settings):
import leveldb
self._leveldb = leveldb
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.leveldb' % spider.name)
self.db = self._leveldb.LevelDB(dbpath)
logger.debug("Using LevelDB cache storage in %(cachepath)s" % {'cachepath': dbpath}, extra={'spider': spider})
def close_spider(self, spider):
# Do compactation each time to save space and also recreate files to
# avoid them being removed in storages with timestamp-based autoremoval.
self.db.CompactRange()
del self.db
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
batch = self._leveldb.WriteBatch()
batch.Put(key + b'_data', pickle.dumps(data, protocol=2))
batch.Put(key + b'_time', to_bytes(str(time())))
self.db.Write(batch)
def _read_data(self, spider, request):
key = self._request_key(request)
try:
ts = self.db.Get(key + b'_time')
except KeyError:
return # not found or invalid entry
if 0 < self.expiration_secs < time() - float(ts):
return # expired
try:
data = self.db.Get(key + b'_data')
except KeyError:
return # invalid entry
else:
return pickle.loads(data)
def _request_key(self, request):
return to_bytes(request_fingerprint(request))
def parse_cachecontrol(header):
"""Parse Cache-Control header
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None,
... b'max-age': b'3600'}
True
>>> parse_cachecontrol(b'') == {}
True
"""
directives = {}
for directive in header.split(b','):
key, sep, val = directive.strip().partition(b'=')
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str):
try:
date_str = to_unicode(date_str, encoding='ascii')
return mktime_tz(parsedate_tz(date_str))
except Exception:
return None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import random
import glob
import os
import sys
import time
import math
import numpy
import pylab
N_MONTH = 4
N_DAY_PER_MONTH = 31
BASE_MONTH = 4
TYPE_LENGTH = 4
class User(object):
def __init__(self, id, info):
self.id = id;
self.brands = info.keys()
self.data = dict()
self.test_label = set()
self.train_label = set()
self.weight = [1, 1, 1, 1]
for brandID in self.brands:
brand = info[brandID]
for month, day, action in brand:
p = (month - BASE_MONTH) * 12
if day > 10:
p += 4
elif day > 20:
p += 8
if month == BASE_MONTH + N_MONTH - 1:
if action == 1:
self.test_label.add(brandID)
else:
if not brandID in self.data:
self.data[brandID] = [0, 0, 0, 0]
self.data[brandID][action] += 1
self.weight[action] += 1
self.train_label.add(brandID)
total_buy_action = float(self.weight[1])
self.weight = [1 / (self.weight[idx] / total_buy_action) for idx, num in enumerate(self.weight)]
for brand in self.data.keys():
self.data[brand] = [num * weight for num, weight in zip(self.data[brand], self.weight)]
def __str__(self):
return str(self.id) + ' ' + str(len(self.bands))
if __name__ == '__main__':
userInfo = dict()
with open('/home/pumpkin/Documents/project/tmall/dataset/t_alibaba_data.csv', 'rb') as csvfile:
# with open('/home/pumpkin/Documents/project/tmall/dataset/demo.csv', 'rb') as csvfile:
user_table = dict()
brand_table = dict()
user_counter = 0
brand_counter = 0
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
userID, brandID, actionType, month, day = [int(field) for field in row]
if not userID in user_table:
user_table[userID] = user_counter
user_counter += 1
if not brandID in brand_table:
brand_table[brandID] = brand_counter
brand_counter += 1
userID = user_table[userID]
brandID = brand_table[brandID]
if not userID in userInfo:
userInfo[userID] = dict()
user = userInfo[userID]
if brandID not in user:
user[brandID] = []
if month in (4, 5, 6):
day = day - 14
else:
day = day - 15
if day <= 0:
month -= 1
day += 31
band = user[brandID]
band.append((month, day, actionType))
users_train = []
for (userID, info) in userInfo.iteritems():
users_train.append(User(userID, info))
item_users = dict()
for user in users_train:
for brand in user.train_label:
if brand not in item_users:
item_users[brand] = set()
item_users[brand].add(user)
overlap = dict()
n_brands = dict()
for brand, users in item_users.items():
for u in users:
if u not in n_brands:
n_brands[u] = 0
if u not in overlap:
overlap[u] = dict()
n_brands[u] += 1
for v in users:
if u is v:
continue
if v not in overlap[u]:
overlap[u][v] = 0
overlap[u][v] += 1
w = dict()
for u, related_users in overlap.items():
if u not in w:
w[u] = dict()
for v, val in related_users.items():
w[u][v] = float(val) / math.sqrt(n_brands[u] * n_brands[v])
for top_n_user in (5, 10, 20, 40, 80, 160):
for top_n_brand in (5, 10, 20, 40, 80, 160):
pBands = []
bBands = []
hitBands = []
for user in users_train:
bBands.append(len(user.test_label))
if user not in w:
continue
rank = dict()
u_brands = user.train_label
for v, w_uv, in sorted(w[user].items(), key=lambda e: e[1], reverse=True)[0:top_n_user]:
for v_brand in v.train_label:
if v_brand in u_brands:
continue
if v_brand not in rank:
rank[v_brand] = 0
rank[v_brand] += w_uv * sum(v.data[v_brand])
hit = 0
total = top_n_brand
for brand, prob in sorted(rank.items(), key=lambda e: e[1], reverse=True)[0:top_n_brand]:
if brand in user.test_label:
hit += 1
hitBands.append(hit)
pBands.append(total)
print sum(hitBands), ' ', sum(pBands), ' ', sum(bBands)
precision = float(sum(hitBands)) / sum(pBands) if not sum(pBands) == 0 else 0
recall = float(sum(hitBands)) / sum(bBands) if not sum(bBands) == 0 else 0
f1 = (2 * precision * recall) / (precision + recall) if not precision + recall == 0 else 0
print 'All(%d %d): %.02f%% (Precision) %.02f%% (Recall) %.02f%% (F1)' % (top_n_user, top_n_brand, precision * 100, recall * 100, f1 * 100)
|
test.compile("source.cpp")
test.run_analysis_script('gcTypes', upto='gcFunctions')
# The suppressions file uses only mangled names since it's for internal use,
# though I may change that soon given (1) the unfortunate non-uniqueness of
# mangled constructor names, and (2) the usefulness of this file for
# mrgiggles's reporting.
suppressed = test.load_suppressed_functions()
# Only one of these is fully suppressed (ie, *always* called within the scope
# of an AutoSuppressGC).
assert(len(filter(lambda f: 'suppressedFunction' in f, suppressed)) == 1)
assert(len(filter(lambda f: 'halfSuppressedFunction' in f, suppressed)) == 0)
assert(len(filter(lambda f: 'unsuppressedFunction' in f, suppressed)) == 0)
# gcFunctions should be the inverse, but we get to rely on unmangled names here.
gcFunctions = test.load_gcFunctions()
print(gcFunctions)
assert('void GC()' in gcFunctions)
assert('void suppressedFunction()' not in gcFunctions)
assert('void halfSuppressedFunction()' in gcFunctions)
assert('void unsuppressedFunction()' in gcFunctions)
assert('void f()' in gcFunctions)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
##***** BEGIN LICENSE BLOCK *****
##Version: MPL 1.1
##
##The contents of this file are subject to the Mozilla Public License Version
##1.1 (the "License"); you may not use this file except in compliance with
##the License. You may obtain a copy of the License at
##http:##www.mozilla.org/MPL/
##
##Software distributed under the License is distributed on an "AS IS" basis,
##WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
##for the specific language governing rights and limitations under the
##License.
##
##The Original Code is the AllegroGraph Java Client interface.
##
##The Original Code was written by Franz Inc.
##Copyright (C) 2006 Franz Inc. All Rights Reserved.
##
##***** END LICENSE BLOCK *****
from __future__ import absolute_import
from ..exceptions import ServerException
from ..repository.repository import Repository
from ...miniclient import repository as miniserver
import urllib
READ_ONLY = 'READ_ONLY'
LEGAL_OPTION_TYPES = {READ_ONLY: bool,}
# * A Sesame Sail contains RDF data that can be queried and updated.
# * Access to the Sail can be acquired by opening a connection to it.
# * This connection can then be used to query and/or update the contents of the
# * repository. Depending on the implementation of the repository, it may or may
# * not support multiple concurrent connections.
# * <p>
# * Please note that a Sail needs to be initialized before it can be used
# * and that it should be shut down before it is discarded/garbage collected.
class AllegroGraphServer(object):
"""
Connects to a remote AllegroGraph HTTP Server
"""
# EXPECTED_UNIQUE_RESOURCES = "EXPECTED_UNIQUE_RESOURCES"
# WITH_INDICES = "WITH_INDICES"
# INCLUDE_STANDARD_PARTS = "INCLUDE_STANDARD_PARTS"
# INDIRECT_HOST = "INDIRECT_HOST"
# INDIRECT_PORT = "INDIRECT_PORT"
def __init__(self, host, port=4567, **options):
self.host = host
self.port = port
self.username = None
self.password = None
self.open_catalogs = []
self.options = options
self.translated_options = None
def _get_address(self):
return "http://%s:%s" % (self.host, self.port)
def getHost(self):
return self.host
def getOptions(self):
return self.options
def _long_catalog_name_to_short_name(self, longName):
pos = longName.rfind('/')
shortName = urllib.unquote_plus(longName[pos + 1:])
return shortName
def listCatalogs(self):
catNames = []
for longName in miniserver.listCatalogs(self._get_address()):
catNames.append(self._long_catalog_name_to_short_name(longName))
return catNames
def openCatalog(self, shortName):
"""
Open a catalog named 'catalogName'.
"""
if not shortName in self.listCatalogs():
raise ServerException("There is no catalog named '%s'" % shortName)
for cat in self.open_catalogs:
if cat.getName() == shortName:
return cat
for longName in miniserver.listCatalogs(self._get_address()):
internalShortName = self._long_catalog_name_to_short_name(longName)
if shortName == internalShortName:
break ## 'longName' is now set
miniCatalog = miniserver.openCatalog(self._get_address(), longName, user=self.username, password=self.password)
catalog = Catalog(shortName, miniCatalog, self)
return catalog
class Catalog(object):
"""
Container of multiple repositories (triple stores).
"""
def __init__(self, short_name, mini_catalog, server):
self.server = server
self.mini_catalog = mini_catalog
self.short_name = short_name
self.is_closed = False
def getName(self):
return self.short_name
def listRepositories(self):
"""
Return a list of names of repositories (triple stores) managed by
this catalog.
"""
def remove_double_quotes(name):
if name.startswith('"'):
name = name[1:-1]
return name
return [remove_double_quotes(rep) for rep in self.mini_catalog.listTripleStores()]
def getRepository(self, name, access_verb, multi_threaded_mode=False):
return Repository(self, name, access_verb, multi_threaded_mode=multi_threaded_mode)
def close(self):
if not self.is_closed:
self.server.open_catalogs.remove(self)
self.is_closed = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.